1
0
Fork 0

dma-mapping: introduce a dma_common_find_pages helper

A helper to find the backing page array based on a virtual address.
This also ensures we do the same vm_flags check everywhere instead
of slightly different or missing ones in a few places.

Signed-off-by: Christoph Hellwig <hch@lst.de>
alistair/sunxi64-5.4-dsi
Christoph Hellwig 2019-06-03 09:14:31 +02:00
parent 512317401f
commit 5cf4537975
4 changed files with 16 additions and 20 deletions

View File

@ -1439,18 +1439,13 @@ static struct page **__atomic_get_pages(void *addr)
static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
{
struct vm_struct *area;
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
return __atomic_get_pages(cpu_addr);
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return cpu_addr;
area = find_vm_area(cpu_addr);
if (area && (area->flags & VM_DMA_COHERENT))
return area->pages;
return NULL;
return dma_common_find_pages(cpu_addr);
}
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,

View File

@ -541,15 +541,6 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages;
}
static struct page **__iommu_dma_get_pages(void *cpu_addr)
{
struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || !area->pages)
return NULL;
return area->pages;
}
/**
* iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
* @dev: Device to allocate memory for. Must be a real device
@ -938,7 +929,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
* If it the address is remapped, then it's either non-coherent
* or highmem CMA, or an iommu_dma_alloc_remap() construction.
*/
pages = __iommu_dma_get_pages(cpu_addr);
pages = dma_common_find_pages(cpu_addr);
if (!pages)
page = vmalloc_to_page(cpu_addr);
dma_common_free_remap(cpu_addr, alloc_size);
@ -1045,7 +1036,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return -ENXIO;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
struct page **pages = __iommu_dma_get_pages(cpu_addr);
struct page **pages = dma_common_find_pages(cpu_addr);
if (pages)
return __iommu_dma_mmap(pages, size, vma);
@ -1067,7 +1058,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
int ret;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
struct page **pages = __iommu_dma_get_pages(cpu_addr);
struct page **pages = dma_common_find_pages(cpu_addr);
if (pages) {
return sg_alloc_table_from_pages(sgt, pages,

View File

@ -626,6 +626,7 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
struct page **dma_common_find_pages(void *cpu_addr);
void *dma_common_contiguous_remap(struct page *page, size_t size,
pgprot_t prot, const void *caller);

View File

@ -11,6 +11,15 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
struct page **dma_common_find_pages(void *cpu_addr)
{
struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || area->flags != VM_DMA_COHERENT)
return NULL;
return area->pages;
}
static struct vm_struct *__dma_common_pages_remap(struct page **pages,
size_t size, pgprot_t prot, const void *caller)
{
@ -78,9 +87,9 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
*/
void dma_common_free_remap(void *cpu_addr, size_t size)
{
struct vm_struct *area = find_vm_area(cpu_addr);
struct page **pages = dma_common_find_pages(cpu_addr);
if (!area || area->flags != VM_DMA_COHERENT) {
if (!pages) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return;
}