swiotlb-xen: ensure we have a single callsite for xen_dma_map_page
Refactor the code a bit to make further changes easier. Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>hifive-unleashed-5.2
parent
2e12dceef3
commit
063b8271ec
|
@ -388,13 +388,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
||||||
if (dma_capable(dev, dev_addr, size) &&
|
if (dma_capable(dev, dev_addr, size) &&
|
||||||
!range_straddles_page_boundary(phys, size) &&
|
!range_straddles_page_boundary(phys, size) &&
|
||||||
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
|
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
|
||||||
(swiotlb_force != SWIOTLB_FORCE)) {
|
swiotlb_force != SWIOTLB_FORCE)
|
||||||
/* we are not interested in the dma_addr returned by
|
goto done;
|
||||||
* xen_dma_map_page, only in the potential cache flushes executed
|
|
||||||
* by the function. */
|
|
||||||
xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
|
|
||||||
return dev_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Oh well, have to allocate and map a bounce buffer.
|
* Oh well, have to allocate and map a bounce buffer.
|
||||||
|
@ -407,19 +402,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
||||||
return DMA_MAPPING_ERROR;
|
return DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
dev_addr = xen_phys_to_bus(map);
|
dev_addr = xen_phys_to_bus(map);
|
||||||
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
|
|
||||||
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that the address returned is DMA'ble
|
* Ensure that the address returned is DMA'ble
|
||||||
*/
|
*/
|
||||||
if (dma_capable(dev, dev_addr, size))
|
if (unlikely(!dma_capable(dev, dev_addr, size))) {
|
||||||
return dev_addr;
|
swiotlb_tbl_unmap_single(dev, map, size, dir,
|
||||||
|
attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
|
return DMA_MAPPING_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
|
page = pfn_to_page(map >> PAGE_SHIFT);
|
||||||
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
|
offset = map & ~PAGE_MASK;
|
||||||
|
done:
|
||||||
return DMA_MAPPING_ERROR;
|
/*
|
||||||
|
* we are not interested in the dma_addr returned by xen_dma_map_page,
|
||||||
|
* only in the potential cache flushes executed by the function.
|
||||||
|
*/
|
||||||
|
xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
|
||||||
|
return dev_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue