1
0
Fork 0

mm: remove hmm_vma_alloc_locked_page

The only user of it has just been removed, and there wasn't really any need
to wrap a basic memory allocator to start with.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
alistair/sunxi64-5.4-dsi
Christoph Hellwig 2019-06-26 14:27:19 +02:00 committed by Jason Gunthorpe
parent 4239f267e3
commit 47e9d836a5
2 changed files with 0 additions and 17 deletions

View File

@ -587,9 +587,6 @@ static inline void hmm_mm_init(struct mm_struct *mm) {}
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
struct hmm_devmem;
struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
unsigned long addr);
/*
* struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
*

View File

@ -1330,20 +1330,6 @@ EXPORT_SYMBOL(hmm_range_dma_unmap);
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
unsigned long addr)
{
struct page *page;
page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
if (!page)
return NULL;
lock_page(page);
return page;
}
EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
static void hmm_devmem_ref_release(struct percpu_ref *ref)
{
struct hmm_devmem *devmem;