1
0
Fork 0

vfio/type1: Support faulting PFNMAP vmas

With conversion to follow_pfn(), DMA mapping a PFNMAP range depends on
the range being faulted into the vma.  Add support to manually provide
that, in the same way as done on KVM with hva_to_pfn_remapped().

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
alistair/sunxi64-5.8
Alex Williamson 2020-04-28 17:02:24 -06:00
parent b9bbe6ed63
commit 4131124222
1 changed files with 33 additions and 3 deletions

View File

@ -317,6 +317,32 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}
static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long vaddr, unsigned long *pfn,
bool write_fault)
{
int ret;
ret = follow_pfn(vma, vaddr, pfn);
if (ret) {
bool unlocked = false;
ret = fixup_user_fault(NULL, mm, vaddr,
FAULT_FLAG_REMOTE |
(write_fault ? FAULT_FLAG_WRITE : 0),
&unlocked);
if (unlocked)
return -EAGAIN;
if (ret)
return ret;
ret = follow_pfn(vma, vaddr, pfn);
}
return ret;
}
static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
int prot, unsigned long *pfn)
{
@ -339,12 +365,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
vaddr = untagged_addr(vaddr);
retry:
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
if (vma && vma->vm_flags & VM_PFNMAP) {
if (!follow_pfn(vma, vaddr, pfn) &&
is_invalid_reserved_pfn(*pfn))
ret = 0;
ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
if (ret == -EAGAIN)
goto retry;
if (!ret && !is_invalid_reserved_pfn(*pfn))
ret = -EFAULT;
}
done:
up_read(&mm->mmap_sem);