KVM: nVMX: check host CR3 on vmentry and vmexit

This commit adds missing host CR3 checks. Before entering guest mode, the value
of CR3 is checked for reserved bits. After returning, nested_vmx_load_cr3 is
called to set the new CR3 value and check and load PDPTRs.

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
This commit is contained in:
Ladi Prosek 2016-11-30 16:03:11 +01:00 committed by Paolo Bonzini
parent 9ed38ffad4
commit 1dc35dacc1
2 changed files with 19 additions and 7 deletions

View file

@ -133,6 +133,7 @@
{ EXIT_REASON_XRSTORS, "XRSTORS" } { EXIT_REASON_XRSTORS, "XRSTORS" }
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */ #endif /* _UAPIVMX_H */

View file

@ -9968,6 +9968,14 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
return 0; return 0;
} }
static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
unsigned long invalid_mask;
invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
return (val & invalid_mask) == 0;
}
/* /*
* Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
* emulating VM entry into a guest with EPT enabled. * emulating VM entry into a guest with EPT enabled.
@ -9977,11 +9985,8 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
unsigned long *entry_failure_code) unsigned long *entry_failure_code)
{ {
unsigned long invalid_mask;
if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); if (!nested_cr3_valid(vcpu, cr3)) {
if (cr3 & invalid_mask) {
*entry_failure_code = ENTRY_FAIL_DEFAULT; *entry_failure_code = ENTRY_FAIL_DEFAULT;
return 1; return 1;
} }
@ -10452,7 +10457,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
} }
if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) || if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) { !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
!nested_cr3_valid(vcpu, vmcs12->host_cr3)) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
goto out; goto out;
@ -10879,6 +10885,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
struct kvm_segment seg; struct kvm_segment seg;
unsigned long entry_failure_code;
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->host_ia32_efer; vcpu->arch.efer = vmcs12->host_ia32_efer;
@ -10916,8 +10923,12 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
nested_ept_uninit_mmu_context(vcpu); nested_ept_uninit_mmu_context(vcpu);
kvm_set_cr3(vcpu, vmcs12->host_cr3); /*
kvm_mmu_reset_context(vcpu); * Only PDPTE load can fail as the value of cr3 was checked on entry and
* couldn't have changed.
*/
if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
if (!enable_ept) if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;