1
0
Fork 0

mm/hmm: remove superfluous arguments from hmm_range_register

The start, end and page_shift values are all saved in the range structure,
so we might as well use that for argument passing.

Link: https://lore.kernel.org/r/20190806160554.14046-7-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
alistair/sunxi64-5.4-dsi
Christoph Hellwig 2019-08-06 19:05:44 +03:00 committed by Jason Gunthorpe
parent 2cbeb41913
commit fac555ac93
5 changed files with 14 additions and 26 deletions

View File

@ -222,7 +222,7 @@ The usage pattern is::
range.flags = ...; range.flags = ...;
range.values = ...; range.values = ...;
range.pfn_shift = ...; range.pfn_shift = ...;
hmm_range_register(&range); hmm_range_register(&range, mirror);
/* /*
* Just wait for range to be valid, safe to ignore return value as we * Just wait for range to be valid, safe to ignore return value as we

View File

@ -818,8 +818,11 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
0 : range->flags[HMM_PFN_WRITE]; 0 : range->flags[HMM_PFN_WRITE];
range->pfn_flags_mask = 0; range->pfn_flags_mask = 0;
range->pfns = pfns; range->pfns = pfns;
hmm_range_register(range, mirror, start, range->page_shift = PAGE_SHIFT;
start + ttm->num_pages * PAGE_SIZE, PAGE_SHIFT); range->start = start;
range->end = start + ttm->num_pages * PAGE_SIZE;
hmm_range_register(range, mirror);
/* /*
* Just wait for range to be valid, safe to ignore return value as we * Just wait for range to be valid, safe to ignore return value as we

View File

@ -492,9 +492,7 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
range->default_flags = 0; range->default_flags = 0;
range->pfn_flags_mask = -1UL; range->pfn_flags_mask = -1UL;
ret = hmm_range_register(range, &svmm->mirror, ret = hmm_range_register(range, &svmm->mirror);
range->start, range->end,
PAGE_SHIFT);
if (ret) { if (ret) {
up_read(&svmm->mm->mmap_sem); up_read(&svmm->mm->mmap_sem);
return (int)ret; return (int)ret;
@ -682,6 +680,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.addr + args.i.p.size, fn - fi); args.i.p.addr + args.i.p.size, fn - fi);
/* Have HMM fault pages within the fault window to the GPU. */ /* Have HMM fault pages within the fault window to the GPU. */
range.page_shift = PAGE_SHIFT;
range.start = args.i.p.addr; range.start = args.i.p.addr;
range.end = args.i.p.addr + args.i.p.size; range.end = args.i.p.addr + args.i.p.size;
range.pfns = args.phys; range.pfns = args.phys;

View File

@ -400,11 +400,7 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror);
/* /*
* Please see Documentation/vm/hmm.rst for how to use the range API. * Please see Documentation/vm/hmm.rst for how to use the range API.
*/ */
int hmm_range_register(struct hmm_range *range, int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror);
struct hmm_mirror *mirror,
unsigned long start,
unsigned long end,
unsigned page_shift);
void hmm_range_unregister(struct hmm_range *range); void hmm_range_unregister(struct hmm_range *range);
/* /*

View File

@ -850,35 +850,25 @@ static void hmm_pfns_clear(struct hmm_range *range,
* hmm_range_register() - start tracking change to CPU page table over a range * hmm_range_register() - start tracking change to CPU page table over a range
* @range: range * @range: range
* @mm: the mm struct for the range of virtual address * @mm: the mm struct for the range of virtual address
* @start: start virtual address (inclusive) *
* @end: end virtual address (exclusive)
* @page_shift: expect page shift for the range
* Return: 0 on success, -EFAULT if the address space is no longer valid * Return: 0 on success, -EFAULT if the address space is no longer valid
* *
* Track updates to the CPU page table see include/linux/hmm.h * Track updates to the CPU page table see include/linux/hmm.h
*/ */
int hmm_range_register(struct hmm_range *range, int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror)
struct hmm_mirror *mirror,
unsigned long start,
unsigned long end,
unsigned page_shift)
{ {
unsigned long mask = ((1UL << page_shift) - 1UL); unsigned long mask = ((1UL << range->page_shift) - 1UL);
struct hmm *hmm = mirror->hmm; struct hmm *hmm = mirror->hmm;
unsigned long flags; unsigned long flags;
range->valid = false; range->valid = false;
range->hmm = NULL; range->hmm = NULL;
if ((start & mask) || (end & mask)) if ((range->start & mask) || (range->end & mask))
return -EINVAL; return -EINVAL;
if (start >= end) if (range->start >= range->end)
return -EINVAL; return -EINVAL;
range->page_shift = page_shift;
range->start = start;
range->end = end;
/* Prevent hmm_release() from running while the range is valid */ /* Prevent hmm_release() from running while the range is valid */
if (!mmget_not_zero(hmm->mm)) if (!mmget_not_zero(hmm->mm))
return -EFAULT; return -EFAULT;