1
0
Fork 0

iommu/io-pgtable: Pass struct iommu_iotlb_gather to ->tlb_add_page()

With all the pieces in place, we can finally propagate the
iommu_iotlb_gather structure from the call to unmap() down to the IOMMU
drivers' implementation of ->tlb_add_page(). Currently everybody ignores
it, but the machinery is now there to defer invalidation.

Signed-off-by: Will Deacon <will@kernel.org>
alistair/sunxi64-5.4-dsi
Will Deacon 2019-07-02 16:45:15 +01:00
parent a2d3a382d6
commit 3951c41af4
8 changed files with 47 additions and 29 deletions

View File

@ -1596,7 +1596,8 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
} while (size -= granule); } while (size -= granule);
} }
static void arm_smmu_tlb_inv_page_nosync(unsigned long iova, size_t granule, static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie) void *cookie)
{ {
arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);

View File

@ -574,7 +574,8 @@ static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
ops->tlb_sync(cookie); ops->tlb_sync(cookie);
} }
static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule, static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie) void *cookie)
{ {
struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_domain *smmu_domain = cookie;

View File

@ -362,7 +362,8 @@ static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
return false; return false;
} }
static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long, static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *,
struct iommu_iotlb_gather *, unsigned long,
size_t, int, arm_v7s_iopte *); size_t, int, arm_v7s_iopte *);
static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
@ -383,7 +384,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
size_t sz = ARM_V7S_BLOCK_SIZE(lvl); size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl); tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz, if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
sz, lvl, tblp) != sz)) sz, lvl, tblp) != sz))
return -EINVAL; return -EINVAL;
} else if (ptep[i]) { } else if (ptep[i]) {
@ -545,6 +546,7 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
} }
static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, unsigned long iova, size_t size,
arm_v7s_iopte blk_pte, arm_v7s_iopte blk_pte,
arm_v7s_iopte *ptep) arm_v7s_iopte *ptep)
@ -581,14 +583,15 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
return 0; return 0;
tablep = iopte_deref(pte, 1); tablep = iopte_deref(pte, 1);
return __arm_v7s_unmap(data, iova, size, 2, tablep); return __arm_v7s_unmap(data, gather, iova, size, 2, tablep);
} }
io_pgtable_tlb_add_page(&data->iop, iova, size); io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
return size; return size;
} }
static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, int lvl, unsigned long iova, size_t size, int lvl,
arm_v7s_iopte *ptep) arm_v7s_iopte *ptep)
{ {
@ -647,7 +650,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
*/ */
smp_wmb(); smp_wmb();
} else { } else {
io_pgtable_tlb_add_page(iop, iova, blk_size); io_pgtable_tlb_add_page(iop, gather, iova, blk_size);
} }
iova += blk_size; iova += blk_size;
} }
@ -657,12 +660,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
* Insert a table at the next level to map the old region, * Insert a table at the next level to map the old region,
* minus the part we want to unmap * minus the part we want to unmap
*/ */
return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep); return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0],
ptep);
} }
/* Keep on walkin' */ /* Keep on walkin' */
ptep = iopte_deref(pte[0], lvl); ptep = iopte_deref(pte[0], lvl);
return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep); return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep);
} }
static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
@ -673,7 +677,7 @@ static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(upper_32_bits(iova))) if (WARN_ON(upper_32_bits(iova)))
return 0; return 0;
return __arm_v7s_unmap(data, iova, size, 1, data->pgd); return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
} }
static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
@ -808,7 +812,8 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
} }
static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie)
{ {
dummy_tlb_flush(iova, granule, granule, cookie); dummy_tlb_flush(iova, granule, granule, cookie);
} }

View File

@ -289,6 +289,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
} }
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, int lvl, unsigned long iova, size_t size, int lvl,
arm_lpae_iopte *ptep); arm_lpae_iopte *ptep);
@ -334,9 +335,11 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
WARN_ON(1);
return -EINVAL; return -EINVAL;
} }
}
__arm_lpae_init_pte(data, paddr, prot, lvl, ptep); __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
return 0; return 0;
@ -536,6 +539,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
} }
static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, unsigned long iova, size_t size,
arm_lpae_iopte blk_pte, int lvl, arm_lpae_iopte blk_pte, int lvl,
arm_lpae_iopte *ptep) arm_lpae_iopte *ptep)
@ -581,14 +585,15 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
tablep = iopte_deref(pte, data); tablep = iopte_deref(pte, data);
} else if (unmap_idx >= 0) { } else if (unmap_idx >= 0) {
io_pgtable_tlb_add_page(&data->iop, iova, size); io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
return size; return size;
} }
return __arm_lpae_unmap(data, iova, size, lvl, tablep); return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
} }
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size, int lvl, unsigned long iova, size_t size, int lvl,
arm_lpae_iopte *ptep) arm_lpae_iopte *ptep)
{ {
@ -622,7 +627,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
*/ */
smp_wmb(); smp_wmb();
} else { } else {
io_pgtable_tlb_add_page(iop, iova, size); io_pgtable_tlb_add_page(iop, gather, iova, size);
} }
return size; return size;
@ -631,13 +636,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
* Insert a table at the next level to map the old region, * Insert a table at the next level to map the old region,
* minus the part we want to unmap * minus the part we want to unmap
*/ */
return arm_lpae_split_blk_unmap(data, iova, size, pte, return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
lvl + 1, ptep); lvl + 1, ptep);
} }
/* Keep on walkin' */ /* Keep on walkin' */
ptep = iopte_deref(pte, data); ptep = iopte_deref(pte, data);
return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
} }
static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
@ -650,7 +655,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0; return 0;
return __arm_lpae_unmap(data, iova, size, lvl, ptep); return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
} }
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@ -1074,7 +1079,8 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
} }
static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie)
{ {
dummy_tlb_flush(iova, granule, granule, cookie); dummy_tlb_flush(iova, granule, granule, cookie);
} }

View File

@ -180,7 +180,8 @@ static void __flush_iotlb_leaf(unsigned long iova, size_t size,
__flush_iotlb_range(iova, size, granule, true, cookie); __flush_iotlb_range(iova, size, granule, true, cookie);
} }
static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie) static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie)
{ {
__flush_iotlb_range(iova, granule, granule, true, cookie); __flush_iotlb_range(iova, granule, granule, true, cookie);
} }

View File

@ -202,7 +202,8 @@ static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
mtk_iommu_tlb_sync(cookie); mtk_iommu_tlb_sync(cookie);
} }
static void mtk_iommu_tlb_flush_page_nosync(unsigned long iova, size_t granule, static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie) void *cookie)
{ {
mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);

View File

@ -178,7 +178,8 @@ static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
qcom_iommu_tlb_sync(cookie); qcom_iommu_tlb_sync(cookie);
} }
static void qcom_iommu_tlb_add_page(unsigned long iova, size_t granule, static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie) void *cookie)
{ {
qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);

View File

@ -28,10 +28,10 @@ enum io_pgtable_fmt {
* @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
* address range. * address range.
* @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
* single page. This function exists purely as an optimisation * single page. IOMMUs that cannot batch TLB invalidation
* for IOMMUs that cannot batch TLB invalidation operations * operations efficiently will typically issue them here, but
* efficiently and are therefore better suited to issuing them * others may decide to update the iommu_iotlb_gather structure
* early rather than deferring them until iommu_tlb_sync(). * and defer the invalidation until iommu_tlb_sync() instead.
* *
* Note that these can all be called in atomic context and must therefore * Note that these can all be called in atomic context and must therefore
* not block. * not block.
@ -42,7 +42,8 @@ struct iommu_flush_ops {
void *cookie); void *cookie);
void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
void *cookie); void *cookie);
void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie); void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie);
}; };
/** /**
@ -209,11 +210,12 @@ io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
} }
static inline void static inline void
io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova, io_pgtable_tlb_add_page(struct io_pgtable *iop,
struct iommu_iotlb_gather * gather, unsigned long iova,
size_t granule) size_t granule)
{ {
if (iop->cfg.tlb->tlb_add_page) if (iop->cfg.tlb->tlb_add_page)
iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie); iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
} }
/** /**