1
0
Fork 0

kvm: nVMX: VMWRITE checks VMCS-link pointer before VMCS field

According to the SDM, a VMWRITE in VMX non-root operation with an
invalid VMCS-link pointer results in VMfailInvalid before the validity
of the VMCS field in the secondary source operand is checked.

For consistency, modify both handle_vmwrite and handle_vmread, even
though there was no problem with the latter.

Fixes: 6d894f498f ("KVM: nVMX: vmread/vmwrite: Use shadow vmcs12 if running L2")
Signed-off-by: Jim Mattson <jmattson@google.com>
Cc: Liran Alon <liran.alon@oracle.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Peter Shier <pshier@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Jon Cargille <jcargill@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
alistair/sensors
Jim Mattson 2019-12-06 15:46:35 -08:00 committed by Paolo Bonzini
parent 5e3d394fdd
commit dd2d6042b7
1 changed files with 25 additions and 34 deletions

View File

@ -4753,32 +4753,28 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
{
unsigned long field;
u64 field_value;
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
int len;
gva_t gva = 0;
struct vmcs12 *vmcs12;
struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
: get_vmcs12(vcpu);
struct x86_exception e;
short offset;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
/*
* In VMX non-root operation, when the VMCS-link pointer is -1ull,
* any VMREAD sets the ALU flags for VMfailInvalid.
*/
if (vmx->nested.current_vmptr == -1ull ||
(is_guest_mode(vcpu) &&
get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
return nested_vmx_failInvalid(vcpu);
if (!is_guest_mode(vcpu))
vmcs12 = get_vmcs12(vcpu);
else {
/*
* When vmcs->vmcs_link_pointer is -1ull, any VMREAD
* to shadowed-field sets the ALU flags for VMfailInvalid.
*/
if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
return nested_vmx_failInvalid(vcpu);
vmcs12 = get_shadow_vmcs12(vcpu);
}
/* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
@ -4855,13 +4851,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
*/
u64 field_value = 0;
struct x86_exception e;
struct vmcs12 *vmcs12;
struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
: get_vmcs12(vcpu);
short offset;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (vmx->nested.current_vmptr == -1ull)
/*
* In VMX non-root operation, when the VMCS-link pointer is -1ull,
* any VMWRITE sets the ALU flags for VMfailInvalid.
*/
if (vmx->nested.current_vmptr == -1ull ||
(is_guest_mode(vcpu) &&
get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
return nested_vmx_failInvalid(vcpu);
if (vmx_instruction_info & (1u << 10))
@ -4889,24 +4892,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
if (!is_guest_mode(vcpu)) {
vmcs12 = get_vmcs12(vcpu);
/*
* Ensure vmcs12 is up-to-date before any VMWRITE that dirties
* vmcs12, else we may crush a field or consume a stale value.
*/
if (!is_shadow_field_rw(field))
copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
} else {
/*
* When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
* to shadowed-field sets the ALU flags for VMfailInvalid.
*/
if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
return nested_vmx_failInvalid(vcpu);
vmcs12 = get_shadow_vmcs12(vcpu);
}
/*
* Ensure vmcs12 is up-to-date before any VMWRITE that dirties
* vmcs12, else we may crush a field or consume a stale value.
*/
if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
offset = vmcs_field_to_offset(field);
if (offset < 0)