drm/etnaviv: iommuv1: fold pagetable alloc and free into caller
Those functions are simple enough to fold them into the calling function. This also fixes a correctness issue, as the alloc/free functions didn't specifiy the device the memory was allocated for. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de> Reviewed-By: Wladimir J. van der Laan <laanwj@gmail.com>
This commit is contained in:
parent
87ceb37560
commit
1a540490e9
|
@ -50,22 +50,6 @@ static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domai
|
||||||
return container_of(domain, struct etnaviv_iommu_domain, domain);
|
return container_of(domain, struct etnaviv_iommu_domain, domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
|
|
||||||
size_t size)
|
|
||||||
{
|
|
||||||
pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
|
|
||||||
if (!pgtable->pgtable)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
|
|
||||||
size_t size)
|
|
||||||
{
|
|
||||||
dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
|
static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
|
||||||
unsigned long iova, phys_addr_t paddr)
|
unsigned long iova, phys_addr_t paddr)
|
||||||
{
|
{
|
||||||
|
@ -78,7 +62,7 @@ static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
|
||||||
static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
|
static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
|
||||||
{
|
{
|
||||||
u32 *p;
|
u32 *p;
|
||||||
int ret, i;
|
int i;
|
||||||
|
|
||||||
etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
|
etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
|
||||||
SZ_4K,
|
SZ_4K,
|
||||||
|
@ -91,12 +75,15 @@ static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
|
||||||
for (i = 0; i < SZ_4K / 4; i++)
|
for (i = 0; i < SZ_4K / 4; i++)
|
||||||
*p++ = 0xdead55aa;
|
*p++ = 0xdead55aa;
|
||||||
|
|
||||||
ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
|
etnaviv_domain->pgtable.pgtable =
|
||||||
if (ret < 0) {
|
dma_alloc_coherent(etnaviv_domain->dev, PT_SIZE,
|
||||||
|
&etnaviv_domain->pgtable.paddr,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!etnaviv_domain->pgtable.pgtable) {
|
||||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||||
etnaviv_domain->bad_page_cpu,
|
etnaviv_domain->bad_page_cpu,
|
||||||
etnaviv_domain->bad_page_dma);
|
etnaviv_domain->bad_page_dma);
|
||||||
return ret;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < PT_ENTRIES; i++)
|
for (i = 0; i < PT_ENTRIES; i++)
|
||||||
|
@ -112,7 +99,9 @@ static void etnaviv_domain_free(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
||||||
|
|
||||||
pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
|
dma_free_coherent(etnaviv_domain->dev, PT_SIZE,
|
||||||
|
etnaviv_domain->pgtable.pgtable,
|
||||||
|
etnaviv_domain->pgtable.paddr);
|
||||||
|
|
||||||
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
||||||
etnaviv_domain->bad_page_cpu,
|
etnaviv_domain->bad_page_cpu,
|
||||||
|
|
Loading…
Reference in a new issue