dma-mapping updates for 5.8, part 2

- add DMA mapping helpers for struct sg_table (Marek Szyprowski)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl7bvogLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYNOtg/9Fv/ahcj/ux+KSE858TcwTxMlo9Jqb8jMFBsJd0wT
 tpnllOsJwjXw7S50wfyACAGHRI8HSjsbga0ogxx2hZDghxtGILhYumhPAxGA4N3i
 eNoe7KtFld5akHF1re/8Cs/eaGTG2q4mk3U16pB6jcMNEo8y60h2HYnxJeIZyskb
 m02pdMCHJxAcsXj/s3SjJWa05x+el7SmJLM7ns8dKnmbAq3u+cfR7AdwoiO1/yOi
 zzrwEo3nfqDyRjKf+UhUN8gz1yRi/i08d+AnDZvDFVizDGaZM+GmqXy7PQbAn+Wq
 wUtwodZqLFnHxRhK+iofjwPnb1Lu34oc0/z9tMP+UdIa4hFrUYwGvE5UmD1HBo/f
 WFSEEofTP6BNlJwUm4enTxJKYieIqQ8f1fiTDmmmUB1XFXOn1v4hD/vH81PHAoQx
 GEXD6QubYSM4qTJjHU8dHcN4kFUlfgbK7KbY1y57msIBdpXAP2TBekvc1k9qj13Q
 M/wIcmHUgTaqBoEz/VIk/SN6msIgcmvOoWGkTspZPw1LkAk2guliNJlkwAk48cz7
 p4++GIaHl+CqsiG+fXQReBvjhdJjiwSQ61GI7pkql9dHignfDsMugn3FSBs0WSKa
 tnEIF4c+6T+A4jaiUTLJ/BDNXQU2glmSxXoi2MOnjwgSjO8BFCZptzkttnk/x5a5
 A6c=
 =LLjY
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-5.8-2' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping helpers from Christoph Hellwig:
 "These were in a separate stable branch so that various media and drm
  trees could pull the in for bug fixes, but looking at linux-next that
  hasn't actually happened yet. Still sending the APIs to you in the
  hope that these bug fixes get picked up for 5.8 in one way or another.

  Summary:

   - add DMA mapping helpers for struct sg_table (Marek Szyprowski)"

* tag 'dma-mapping-5.8-2' of git://git.infradead.org/users/hch/dma-mapping:
  iommu: add generic helper for mapping sgtable objects
  scatterlist: add generic wrappers for iterating over sgtable objects
  dma-mapping: add generic helpers for mapping sgtable objects
This commit is contained in:
Linus Torvalds 2020-06-06 11:55:53 -07:00
commit 6f2dc3d335
3 changed files with 143 additions and 3 deletions

View file

@ -609,6 +609,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
/**
* dma_map_sgtable - Map the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
* @attrs: Optional DMA attributes for the map operation
*
* Maps a buffer described by a scatterlist stored in the given sg_table
* object for the @dir DMA operation by the @dev device. After success the
* ownership for the buffer is transferred to the DMA domain. One has to
* call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
* ownership of the buffer back to the CPU domain before touching the
* buffer by the CPU.
*
* Returns 0 on success or -EINVAL on error during mapping the buffer.
*/
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
{
int nents;
nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
if (nents <= 0)
return -EINVAL;
sgt->nents = nents;
return 0;
}
/**
* dma_unmap_sgtable - Unmap the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
* @attrs: Optional DMA attributes for the unmap operation
*
* Unmaps a buffer described by a scatterlist stored in the given sg_table
* object for the @dir DMA operation by the @dev device. After this function
* the ownership of the buffer is transferred back to the CPU domain.
*/
static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
{
dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
}
/**
* dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
*
* Performs the needed cache synchronization and moves the ownership of the
* buffer back to the CPU domain, so it is safe to perform any access to it
* by the CPU. Before doing any further DMA operations, one has to transfer
* the ownership of the buffer back to the DMA domain by calling the
* dma_sync_sgtable_for_device().
*/
static inline void dma_sync_sgtable_for_cpu(struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
}
/**
* dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
*
* Performs the needed cache synchronization and moves the ownership of the
* buffer back to the DMA domain, so it is safe to perform the DMA operation.
* Once finished, one has to call dma_sync_sgtable_for_cpu() or
* dma_unmap_sgtable().
*/
static inline void dma_sync_sgtable_for_device(struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
}
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)

View file

@ -466,6 +466,22 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
/**
* iommu_map_sgtable - Map the given buffer to the IOMMU domain
* @domain: The IOMMU domain to perform the mapping
* @iova: The start address to map the buffer
* @sgt: The sg_table object describing the buffer
* @prot: IOMMU protection bits
*
* Creates a mapping at @iova for the buffer described by a scatterlist
* stored in the given sg_table object in the provided IOMMU domain.
*/
static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot)
{
return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
}
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern void generic_iommu_put_resv_regions(struct device *dev,

View file

@ -151,6 +151,20 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
/*
* Loop over each sg element in the given sg_table object.
*/
#define for_each_sgtable_sg(sgt, sg, i) \
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
/*
* Loop over each sg element in the given *DMA mapped* sg_table object.
* Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
* of the each element.
*/
#define for_each_sgtable_dma_sg(sgt, sg, i) \
for_each_sg(sgt->sgl, sg, sgt->nents, i)
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
@ -401,9 +415,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* @sglist: sglist to iterate over
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
* @pgoffset: starting page offset
* @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_page() to get each page pointer.
* In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
@ -412,18 +427,47 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
/**
* for_each_sg_dma_page - iterate over the pages of the given sg list
* @sglist: sglist to iterate over
* @dma_iter: page iterator to hold current page
* @dma_iter: DMA page iterator to hold current page
* @dma_nents: maximum number of sg entries to iterate over, this is the value
* returned from dma_map_sg
* @pgoffset: starting page offset
* @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_dma_address() to get each page's DMA address.
* In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
pgoffset); \
__sg_page_iter_dma_next(dma_iter);)
/**
* for_each_sgtable_page - iterate over all pages in the sg_table object
* @sgt: sg_table object to iterate over
* @piter: page iterator to hold current page
* @pgoffset: starting page offset (in pages)
*
* Iterates over the all memory pages in the buffer described by
* a scatterlist stored in the given sg_table object.
* See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sgtable_page(sgt, piter, pgoffset) \
for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
/**
* for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
* @sgt: sg_table object to iterate over
* @dma_iter: DMA page iterator to hold current page
* @pgoffset: starting page offset (in pages)
*
* Iterates over the all DMA mapped pages in the buffer described by
* a scatterlist stored in the given sg_table object.
* See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
* unit.
*/
#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
/*
* Mapping sg iterator
*