1
0
Fork 0

mm/hmm: add missing call to hmm_range_need_fault() before returning EFAULT

All return paths that do EFAULT must call hmm_range_need_fault() to
determine if the user requires this page to be valid.

If the page cannot be made valid if the user later requires it, due to vma
flags in this case, then the return should be HMM_PFN_ERROR.

Fixes: a3e0d41c2b ("mm/hmm: improve driver API to work and wait over a range")
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
alistair/sensors
Jason Gunthorpe 2020-03-05 12:00:22 -04:00
parent 7d082987e5
commit c2579c9c4a
1 changed files with 8 additions and 11 deletions

View File

@ -604,18 +604,15 @@ static int hmm_vma_walk_test(unsigned long start, unsigned long end,
struct vm_area_struct *vma = walk->vma;
/*
* Skip vma ranges that don't have struct page backing them or
* map I/O devices directly.
*/
if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
return -EFAULT;
/*
* Skip vma ranges that don't have struct page backing them or map I/O
* devices directly.
*
* If the vma does not allow read access, then assume that it does not
* allow write access either. HMM does not support architectures
* that allow write without read.
* allow write access either. HMM does not support architectures that
* allow write without read.
*/
if (!(vma->vm_flags & VM_READ)) {
if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) ||
!(vma->vm_flags & VM_READ)) {
bool fault, write_fault;
/*
@ -629,7 +626,7 @@ static int hmm_vma_walk_test(unsigned long start, unsigned long end,
if (fault || write_fault)
return -EFAULT;
hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
hmm_vma_walk->last = end;
/* Skip this vma and continue processing the next vma. */