1
0
Fork 0

iommu/vt-d: Use dma_direct for bypass devices

The intel-iommu driver currently has a partial reimplementation
of the direct mapping code for devices that use pass through
mode.  Replace that code with calls to the relevant dma_direct
routines at the highest level.  This means we have exactly the
same behvior as the dma direct code itself, and can prepare for
eventually only attaching the intel_iommu ops to devices that
actually need dynamic iommu mappings.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
hifive-unleashed-5.2
Christoph Hellwig 2019-04-10 18:14:07 +02:00 committed by Joerg Roedel
parent 48b2c937ea
commit 9cc0c2af8d
1 changed files with 31 additions and 40 deletions

View File

@ -3657,9 +3657,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
if (!iommu_need_mapping(dev))
return paddr;
domain = get_valid_domain_for_dev(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@ -3708,15 +3705,20 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
return __intel_map_single(dev, page_to_phys(page) + offset, size,
dir, *dev->dma_mask);
if (iommu_need_mapping(dev))
return __intel_map_single(dev, page_to_phys(page) + offset,
size, dir, *dev->dma_mask);
return dma_direct_map_page(dev, page, offset, size, dir, attrs);
}
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
if (iommu_need_mapping(dev))
return __intel_map_single(dev, phys_addr, size, dir,
*dev->dma_mask);
return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@ -3728,9 +3730,6 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
struct intel_iommu *iommu;
struct page *freelist;
if (!iommu_need_mapping(dev))
return;
domain = find_domain(dev);
BUG_ON(!domain);
@ -3766,7 +3765,17 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
intel_unmap(dev, dev_addr, size);
if (iommu_need_mapping(dev))
intel_unmap(dev, dev_addr, size);
else
dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
}
static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
if (iommu_need_mapping(dev))
intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@ -3776,28 +3785,18 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
int order;
if (!iommu_need_mapping(dev))
return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
size = PAGE_ALIGN(size);
order = get_order(size);
if (iommu_need_mapping(dev))
flags &= ~(GFP_DMA | GFP_DMA32);
else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
flags |= GFP_DMA;
else
flags |= GFP_DMA32;
}
flags &= ~(GFP_DMA | GFP_DMA32);
if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
flags & __GFP_NOWARN);
if (page && !iommu_need_mapping(dev) &&
page_to_phys(page) + size > dev->coherent_dma_mask) {
dma_release_from_contiguous(dev, page, count);
page = NULL;
}
}
if (!page)
@ -3823,6 +3822,9 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order;
struct page *page = virt_to_page(vaddr);
if (!iommu_need_mapping(dev))
return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
size = PAGE_ALIGN(size);
order = get_order(size);
@ -3840,6 +3842,9 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;
if (!iommu_need_mapping(dev))
return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
}
@ -3847,20 +3852,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
static int intel_nontranslate_map_sg(struct device *hddev,
struct scatterlist *sglist, int nelems, int dir)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg_page(sg));
sg->dma_address = sg_phys(sg);
sg->dma_length = sg->length;
}
return nelems;
}
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, unsigned long attrs)
{
@ -3876,7 +3867,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
BUG_ON(dir == DMA_NONE);
if (!iommu_need_mapping(dev))
return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = get_valid_domain_for_dev(dev);
if (!domain)
@ -3926,7 +3917,7 @@ static const struct dma_map_ops intel_dma_ops = {
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.map_resource = intel_map_resource,
.unmap_resource = intel_unmap_page,
.unmap_resource = intel_unmap_resource,
.dma_supported = dma_direct_supported,
};