|
|
|
@ -2167,8 +2167,8 @@ static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct mmu_page_path {
|
|
|
|
|
struct kvm_mmu_page *parent[PT64_ROOT_LEVEL];
|
|
|
|
|
unsigned int idx[PT64_ROOT_LEVEL];
|
|
|
|
|
struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
|
|
|
|
|
unsigned int idx[PT64_ROOT_MAX_LEVEL];
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define for_each_sp(pvec, sp, parents, i) \
|
|
|
|
@ -2383,8 +2383,8 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
|
|
|
|
|
iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
|
|
|
|
|
iterator->level = vcpu->arch.mmu.shadow_root_level;
|
|
|
|
|
|
|
|
|
|
if (iterator->level == PT64_ROOT_LEVEL &&
|
|
|
|
|
vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
|
|
|
|
|
if (iterator->level == PT64_ROOT_4LEVEL &&
|
|
|
|
|
vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL &&
|
|
|
|
|
!vcpu->arch.mmu.direct_map)
|
|
|
|
|
--iterator->level;
|
|
|
|
|
|
|
|
|
@ -3322,8 +3322,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
|
|
|
|
|
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
|
|
|
|
|
(vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
|
|
|
|
|
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL &&
|
|
|
|
|
(vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL ||
|
|
|
|
|
vcpu->arch.mmu.direct_map)) {
|
|
|
|
|
hpa_t root = vcpu->arch.mmu.root_hpa;
|
|
|
|
|
|
|
|
|
@ -3375,13 +3375,13 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
|
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
|
|
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
|
|
|
|
|
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
|
|
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
if(make_mmu_pages_available(vcpu) < 0) {
|
|
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL);
|
|
|
|
|
sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_4LEVEL, 1, ACC_ALL);
|
|
|
|
|
++sp->root_count;
|
|
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
vcpu->arch.mmu.root_hpa = __pa(sp->spt);
|
|
|
|
@ -3425,7 +3425,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
|
|
* Do we shadow a long mode page table? If so we need to
|
|
|
|
|
* write-protect the guests page table root.
|
|
|
|
|
*/
|
|
|
|
|
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
|
|
|
|
|
if (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL) {
|
|
|
|
|
hpa_t root = vcpu->arch.mmu.root_hpa;
|
|
|
|
|
|
|
|
|
|
MMU_WARN_ON(VALID_PAGE(root));
|
|
|
|
@ -3435,7 +3435,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
|
|
|
|
|
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_4LEVEL,
|
|
|
|
|
0, ACC_ALL);
|
|
|
|
|
root = __pa(sp->spt);
|
|
|
|
|
++sp->root_count;
|
|
|
|
@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
|
|
* the shadow page table may be a PAE or a long mode page table.
|
|
|
|
|
*/
|
|
|
|
|
pm_mask = PT_PRESENT_MASK;
|
|
|
|
|
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
|
|
|
|
|
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL)
|
|
|
|
|
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < 4; ++i) {
|
|
|
|
@ -3486,7 +3486,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|
|
|
|
* If we shadow a 32 bit page table with a long mode page
|
|
|
|
|
* table we enter this path.
|
|
|
|
|
*/
|
|
|
|
|
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
|
|
|
|
|
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
|
|
|
|
|
if (vcpu->arch.mmu.lm_root == NULL) {
|
|
|
|
|
/*
|
|
|
|
|
* The additional page necessary for this is only
|
|
|
|
@ -3531,7 +3531,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
|
|
|
|
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
|
|
|
|
|
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
|
|
|
|
|
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
|
|
|
|
|
if (vcpu->arch.mmu.root_level == PT64_ROOT_4LEVEL) {
|
|
|
|
|
hpa_t root = vcpu->arch.mmu.root_hpa;
|
|
|
|
|
sp = page_header(root);
|
|
|
|
|
mmu_sync_children(vcpu, sp);
|
|
|
|
@ -3614,7 +3614,7 @@ static bool
|
|
|
|
|
walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
|
|
|
|
|
{
|
|
|
|
|
struct kvm_shadow_walk_iterator iterator;
|
|
|
|
|
u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
|
|
|
|
|
u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
|
|
|
|
|
int root, leaf;
|
|
|
|
|
bool reserved = false;
|
|
|
|
|
|
|
|
|
@ -4057,7 +4057,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
|
|
|
|
|
rsvd_check->rsvd_bits_mask[1][0] =
|
|
|
|
|
rsvd_check->rsvd_bits_mask[0][0];
|
|
|
|
|
break;
|
|
|
|
|
case PT64_ROOT_LEVEL:
|
|
|
|
|
case PT64_ROOT_4LEVEL:
|
|
|
|
|
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
|
|
|
|
|
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
|
|
|
|
|
rsvd_bits(maxphyaddr, 51);
|
|
|
|
@ -4367,7 +4367,7 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
|
|
|
|
|
static void paging64_init_context(struct kvm_vcpu *vcpu,
|
|
|
|
|
struct kvm_mmu *context)
|
|
|
|
|
{
|
|
|
|
|
paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
|
|
|
|
|
paging64_init_context_common(vcpu, context, PT64_ROOT_4LEVEL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void paging32_init_context(struct kvm_vcpu *vcpu,
|
|
|
|
@ -4422,7 +4422,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
|
|
|
|
context->root_level = 0;
|
|
|
|
|
} else if (is_long_mode(vcpu)) {
|
|
|
|
|
context->nx = is_nx(vcpu);
|
|
|
|
|
context->root_level = PT64_ROOT_LEVEL;
|
|
|
|
|
context->root_level = PT64_ROOT_4LEVEL;
|
|
|
|
|
reset_rsvds_bits_mask(vcpu, context);
|
|
|
|
|
context->gva_to_gpa = paging64_gva_to_gpa;
|
|
|
|
|
} else if (is_pae(vcpu)) {
|
|
|
|
@ -4533,7 +4533,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
|
|
|
|
g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
|
|
|
|
|
} else if (is_long_mode(vcpu)) {
|
|
|
|
|
g_context->nx = is_nx(vcpu);
|
|
|
|
|
g_context->root_level = PT64_ROOT_LEVEL;
|
|
|
|
|
g_context->root_level = PT64_ROOT_4LEVEL;
|
|
|
|
|
reset_rsvds_bits_mask(vcpu, g_context);
|
|
|
|
|
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
|
|
|
|
|
} else if (is_pae(vcpu)) {
|
|
|
|
|