diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index b1fdb926bc6f..f2c50685bd3d 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1370,6 +1370,30 @@ out: return ret; } +#ifdef CONFIG_ARM64 +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot) +{ + switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) { + case PTE_ATTRINDX(MT_DEVICE_nGnRE): + case PTE_ATTRINDX(MT_DEVICE_nGnRnE): + case PTE_ATTRINDX(MT_DEVICE_GRE): + return PAGE_S2_DEVICE; + case PTE_ATTRINDX(MT_NORMAL_NC): + case PTE_ATTRINDX(MT_NORMAL): + return (pgprot_val(prot) & PTE_SHARED) + ? PAGE_S2 + : PAGE_S2_NS; + } + + return PAGE_S2_DEVICE; +} +#else +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot) +{ + return PAGE_S2_DEVICE; +} +#endif + static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) { kvm_pfn_t pfn = *pfnp; @@ -1713,8 +1737,23 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * 3 levels, i.e, PMD is not folded. */ if (vma_pagesize == PMD_SIZE || - (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) + (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) { gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; + } else { + if (!is_vm_hugetlb_page(vma)) { + pte_t *pte; + spinlock_t *ptl; + pgprot_t prot; + + pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl); + prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte))); + pte_unmap_unlock(pte, ptl); +#ifdef CONFIG_ARM64 + if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS)) + mem_type = PAGE_S2_NS; +#endif + } + } up_read(¤t->mm->mmap_sem); /* We need minimum second+third level pages */ @@ -1743,6 +1782,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (is_error_noslot_pfn(pfn)) return -EFAULT; +#ifdef CONFIG_ARM64 + if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) { + flags |= KVM_S2PTE_FLAG_IS_IOMAP; + } else +#endif if (kvm_is_device_pfn(pfn)) { mem_type = PAGE_S2_DEVICE; flags |= KVM_S2PTE_FLAG_IS_IOMAP; @@ -2322,6 +2366,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, gpa_t gpa = mem->guest_phys_addr + (vm_start - mem->userspace_addr); phys_addr_t pa; + pgprot_t prot; + pte_t *pte; + spinlock_t *ptl; pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; pa += vm_start - vma->vm_start; @@ -2332,9 +2379,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, goto out; } + pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl); + prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte))); + pte_unmap_unlock(pte, ptl); + ret = kvm_phys_addr_ioremap(kvm, gpa, pa, vm_end - vm_start, - writable, PAGE_S2_DEVICE); + writable, prot); if (ret) break; }