1
0
Fork 0

Merge branch 'kvm/next' into next

* kvm/next:
  virt/vgic: Increase number of DeviceIDs to 17
  arm/arm64: KVM: drop qman mmio cacheable mapping hack
  arm/arm64 KVM: allow specifying s2 prot bits when mapping i/o
  arm64: KVM: support flushing device memory
5.4-rM2-2.2.x-imx-squashed
Dong Aisheng 2019-12-02 18:02:32 +08:00
commit 89c08f49ef
5 changed files with 74 additions and 10 deletions

View File

@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable);
phys_addr_t pa, unsigned long size, bool writable,
pgprot_t prot);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);

View File

@ -156,7 +156,8 @@ void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable);
phys_addr_t pa, unsigned long size, bool writable,
pgprot_t prot);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
@ -341,8 +342,16 @@ static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
struct page *page = pte_page(pte);
kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
if (pfn_valid(pte_pfn(pte))) {
struct page *page = pte_page(pte);
kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
} else {
void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE);
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
iounmap(va);
}
}
}

View File

@ -1330,9 +1330,11 @@ static int stage2_pudp_test_and_clear_young(pud_t *pud)
* @guest_ipa: The IPA at which to insert the mapping
* @pa: The physical address of the device
* @size: The size of the mapping
* @prot: S2 page translation bits
*/
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable)
phys_addr_t pa, unsigned long size, bool writable,
pgprot_t prot)
{
phys_addr_t addr, end;
int ret = 0;
@ -1343,7 +1345,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
pfn = __phys_to_pfn(pa);
for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
pte_t pte = kvm_pfn_pte(pfn, prot);
if (writable)
pte = kvm_s2pte_mkwrite(pte);
@ -1368,6 +1370,30 @@ out:
return ret;
}
#ifdef CONFIG_ARM64
static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
{
switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
case PTE_ATTRINDX(MT_DEVICE_nGnRE):
case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
case PTE_ATTRINDX(MT_DEVICE_GRE):
return PAGE_S2_DEVICE;
case PTE_ATTRINDX(MT_NORMAL_NC):
case PTE_ATTRINDX(MT_NORMAL):
return (pgprot_val(prot) & PTE_SHARED)
? PAGE_S2
: PAGE_S2_NS;
}
return PAGE_S2_DEVICE;
}
#else
static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
{
return PAGE_S2_DEVICE;
}
#endif
static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
{
kvm_pfn_t pfn = *pfnp;
@ -1711,8 +1737,23 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* 3 levels, i.e, PMD is not folded.
*/
if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) {
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
} else {
if (!is_vm_hugetlb_page(vma)) {
pte_t *pte;
spinlock_t *ptl;
pgprot_t prot;
pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
pte_unmap_unlock(pte, ptl);
#ifdef CONFIG_ARM64
if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
mem_type = PAGE_S2_NS;
#endif
}
}
up_read(&current->mm->mmap_sem);
/* We need minimum second+third level pages */
@ -1741,6 +1782,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (is_error_noslot_pfn(pfn))
return -EFAULT;
#ifdef CONFIG_ARM64
if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
flags |= KVM_S2PTE_FLAG_IS_IOMAP;
} else
#endif
if (kvm_is_device_pfn(pfn)) {
mem_type = PAGE_S2_DEVICE;
flags |= KVM_S2PTE_FLAG_IS_IOMAP;
@ -2320,6 +2366,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
gpa_t gpa = mem->guest_phys_addr +
(vm_start - mem->userspace_addr);
phys_addr_t pa;
pgprot_t prot;
pte_t *pte;
spinlock_t *ptl;
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
pa += vm_start - vma->vm_start;
@ -2330,9 +2379,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
goto out;
}
pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
pte_unmap_unlock(pte, ptl);
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
writable);
writable, prot);
if (ret)
break;
}

View File

@ -241,7 +241,7 @@ static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
#define GIC_LPI_OFFSET 8192
#define VITS_TYPER_IDBITS 16
#define VITS_TYPER_DEVBITS 16
#define VITS_TYPER_DEVBITS 17
#define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
#define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)

View File

@ -341,7 +341,8 @@ int vgic_v2_map_resources(struct kvm *kvm)
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
kvm_vgic_global_state.vcpu_base,
KVM_VGIC_V2_CPU_SIZE, true);
KVM_VGIC_V2_CPU_SIZE, true,
PAGE_S2_DEVICE);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
goto out;