1
0
Fork 0

mm: optimize dev_pagemap reference counting around get_dev_pagemap

Change the calling convention so that get_dev_pagemap always consumes the
previous reference instead of doing this using an explicit earlier call to
put_dev_pagemap in the callers.

The callers will still need to put the final reference after finishing the
loop over the pages.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
hifive-unleashed-5.1
Christoph Hellwig 2017-12-29 08:54:01 +01:00 committed by Dan Williams
parent 0822acb86c
commit 832d7aa051
2 changed files with 14 additions and 10 deletions

View File

@ -507,22 +507,23 @@ struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
* @pfn: page frame number to lookup page_map * @pfn: page frame number to lookup page_map
* @pgmap: optional known pgmap that already has a reference * @pgmap: optional known pgmap that already has a reference
* *
* @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
* same mapping. * is non-NULL but does not cover @pfn the reference to it will be released.
*/ */
struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap) struct dev_pagemap *pgmap)
{ {
const struct resource *res = pgmap ? pgmap->res : NULL;
resource_size_t phys = PFN_PHYS(pfn); resource_size_t phys = PFN_PHYS(pfn);
/* /*
* In the cached case we're already holding a live reference so * In the cached case we're already holding a live reference.
* we can simply do a blind increment
*/ */
if (res && phys >= res->start && phys <= res->end) { if (pgmap) {
percpu_ref_get(pgmap->ref); const struct resource *res = pgmap ? pgmap->res : NULL;
return pgmap;
if (res && phys >= res->start && phys <= res->end)
return pgmap;
put_dev_pagemap(pgmap);
} }
/* fall back to slow path lookup */ /* fall back to slow path lookup */

View File

@ -1410,7 +1410,6 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
VM_BUG_ON_PAGE(compound_head(page) != head, page); VM_BUG_ON_PAGE(compound_head(page) != head, page);
put_dev_pagemap(pgmap);
SetPageReferenced(page); SetPageReferenced(page);
pages[*nr] = page; pages[*nr] = page;
(*nr)++; (*nr)++;
@ -1420,6 +1419,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
ret = 1; ret = 1;
pte_unmap: pte_unmap:
if (pgmap)
put_dev_pagemap(pgmap);
pte_unmap(ptem); pte_unmap(ptem);
return ret; return ret;
} }
@ -1459,10 +1460,12 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
SetPageReferenced(page); SetPageReferenced(page);
pages[*nr] = page; pages[*nr] = page;
get_page(page); get_page(page);
put_dev_pagemap(pgmap);
(*nr)++; (*nr)++;
pfn++; pfn++;
} while (addr += PAGE_SIZE, addr != end); } while (addr += PAGE_SIZE, addr != end);
if (pgmap)
put_dev_pagemap(pgmap);
return 1; return 1;
} }