1
0
Fork 0

KVM: arm64: Convert write-protect operation to generic page-table API

Convert stage2_wp_range() to call the kvm_pgtable_stage2_wrprotect()
function of the generic page-table code instead of walking the page-table
directly.

Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Cc: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200911132529.19844-14-will@kernel.org
zero-sugar-mainline-defconfig
Quentin Perret 2020-09-11 14:25:21 +01:00 committed by Marc Zyngier
parent 73d49df2c3
commit cc38d61cac
1 changed files with 4 additions and 21 deletions

View File

@ -71,6 +71,9 @@ static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
return ret;
}
#define stage2_apply_range_resched(kvm, addr, end, fn) \
stage2_apply_range(kvm, addr, end, fn, true)
static bool memslot_is_logging(struct kvm_memory_slot *memslot)
{
return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
@ -1302,27 +1305,7 @@ static void stage2_wp_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
{
struct kvm *kvm = mmu->kvm;
pgd_t *pgd;
phys_addr_t next;
pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
do {
/*
* Release kvm_mmu_lock periodically if the memory region is
* large. Otherwise, we may see kernel panics with
* CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
* CONFIG_LOCKDEP. Additionally, holding the lock too long
* will also starve other vCPUs. We have to also make sure
* that the page tables are not freed while we released
* the lock.
*/
cond_resched_lock(&kvm->mmu_lock);
if (!READ_ONCE(mmu->pgd))
break;
next = stage2_pgd_addr_end(kvm, addr, end);
if (stage2_pgd_present(kvm, *pgd))
stage2_wp_p4ds(mmu, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
}
/**