1
0
Fork 0

KVM: x86: Modify struct kvm_nested_state to have explicit fields for data

Improve the KVM_{GET,SET}_NESTED_STATE structs by detailing the format
of VMX nested state data in a struct.

In order to avoid changing the ioctl values of
KVM_{GET,SET}_NESTED_STATE, there is a need to preserve
sizeof(struct kvm_nested_state). This is done by defining the data
struct as "data.vmx[0]". It was the most elegant way I found to
preserve struct size while still keeping struct readable and easy to
maintain. It does have a misfortunate side-effect that now it has to be
accessed as "data.vmx[0]" rather than just "data.vmx".

Because we are already modifying these structs, I also modified the
following:
* Define the "format" field values as macros.
* Rename vmcs_pa to vmcs12_pa for better readability.

Signed-off-by: Liran Alon <liran.alon@oracle.com>
[Remove SVM stubs, add KVM_STATE_NESTED_VMX_VMCS12_SIZE. - Paolo]
Reviewed-by: Liran Alon <liran.alon@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
hifive-unleashed-5.2
Liran Alon 2019-06-16 15:03:10 +03:00 committed by Paolo Bonzini
parent 76e3bcdb61
commit 6ca00dfafd
6 changed files with 122 additions and 85 deletions

View File

@ -3857,43 +3857,59 @@ Type: vcpu ioctl
Parameters: struct kvm_nested_state (in/out) Parameters: struct kvm_nested_state (in/out)
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
Errors: Errors:
E2BIG: the total state size (including the fixed-size part of struct E2BIG: the total state size exceeds the value of 'size' specified by
kvm_nested_state) exceeds the value of 'size' specified by
the user; the size required will be written into size. the user; the size required will be written into size.
struct kvm_nested_state { struct kvm_nested_state {
__u16 flags; __u16 flags;
__u16 format; __u16 format;
__u32 size; __u32 size;
union { union {
struct kvm_vmx_nested_state vmx; struct kvm_vmx_nested_state_hdr vmx;
struct kvm_svm_nested_state svm; struct kvm_svm_nested_state_hdr svm;
/* Pad the header to 128 bytes. */
__u8 pad[120]; __u8 pad[120];
}; } hdr;
__u8 data[0];
union {
struct kvm_vmx_nested_state_data vmx[0];
struct kvm_svm_nested_state_data svm[0];
} data;
}; };
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002 #define KVM_STATE_NESTED_FORMAT_SVM 1
struct kvm_vmx_nested_state { #define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
#define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_VMX_SMM_VMXON 0x00000002
struct kvm_vmx_nested_state_hdr {
__u64 vmxon_pa; __u64 vmxon_pa;
__u64 vmcs_pa; __u64 vmcs12_pa;
struct { struct {
__u16 flags; __u16 flags;
} smm; } smm;
}; };
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
};
This ioctl copies the vcpu's nested virtualization state from the kernel to This ioctl copies the vcpu's nested virtualization state from the kernel to
userspace. userspace.
The maximum size of the state, including the fixed-size part of struct The maximum size of the state can be retrieved by passing KVM_CAP_NESTED_STATE
kvm_nested_state, can be retrieved by passing KVM_CAP_NESTED_STATE to to the KVM_CHECK_EXTENSION ioctl().
the KVM_CHECK_EXTENSION ioctl().
4.115 KVM_SET_NESTED_STATE 4.115 KVM_SET_NESTED_STATE
@ -3903,8 +3919,8 @@ Type: vcpu ioctl
Parameters: struct kvm_nested_state (in) Parameters: struct kvm_nested_state (in)
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
This copies the vcpu's kvm_nested_state struct from userspace to the kernel. For This copies the vcpu's kvm_nested_state struct from userspace to the kernel.
the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE. For the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
4.116 KVM_(UN)REGISTER_COALESCED_MMIO 4.116 KVM_(UN)REGISTER_COALESCED_MMIO

View File

@ -383,6 +383,9 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) #define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004 #define KVM_STATE_NESTED_EVMCS 0x00000004
@ -390,9 +393,16 @@ struct kvm_sync_regs {
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002 #define KVM_STATE_NESTED_SMM_VMXON 0x00000002
struct kvm_vmx_nested_state { #define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
};
struct kvm_vmx_nested_state_hdr {
__u64 vmxon_pa; __u64 vmxon_pa;
__u64 vmcs_pa; __u64 vmcs12_pa;
struct { struct {
__u16 flags; __u16 flags;
@ -401,24 +411,25 @@ struct kvm_vmx_nested_state {
/* for KVM_CAP_NESTED_STATE */ /* for KVM_CAP_NESTED_STATE */
struct kvm_nested_state { struct kvm_nested_state {
/* KVM_STATE_* flags */
__u16 flags; __u16 flags;
/* 0 for VMX, 1 for SVM. */
__u16 format; __u16 format;
/* 128 for SVM, 128 + VMCS size for VMX. */
__u32 size; __u32 size;
union { union {
/* VMXON, VMCS */ struct kvm_vmx_nested_state_hdr vmx;
struct kvm_vmx_nested_state vmx;
/* Pad the header to 128 bytes. */ /* Pad the header to 128 bytes. */
__u8 pad[120]; __u8 pad[120];
}; } hdr;
__u8 data[0]; /*
* Define data region as 0 bytes to preserve backwards-compatability
* to old definition of kvm_nested_state in order to avoid changing
* KVM_{GET,PUT}_NESTED_STATE ioctl values.
*/
union {
struct kvm_vmx_nested_state_data vmx[0];
} data;
}; };
#endif /* _ASM_X86_KVM_H */ #endif /* _ASM_X86_KVM_H */

View File

@ -5226,14 +5226,16 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12; struct vmcs12 *vmcs12;
struct kvm_nested_state kvm_state = { struct kvm_nested_state kvm_state = {
.flags = 0, .flags = 0,
.format = 0, .format = KVM_STATE_NESTED_FORMAT_VMX,
.size = sizeof(kvm_state), .size = sizeof(kvm_state),
.vmx.vmxon_pa = -1ull, .hdr.vmx.vmxon_pa = -1ull,
.vmx.vmcs_pa = -1ull, .hdr.vmx.vmcs12_pa = -1ull,
}; };
struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
&user_kvm_nested_state->data.vmx[0];
if (!vcpu) if (!vcpu)
return kvm_state.size + 2 * VMCS12_SIZE; return kvm_state.size + sizeof(*user_vmx_nested_state);
vmx = to_vmx(vcpu); vmx = to_vmx(vcpu);
vmcs12 = get_vmcs12(vcpu); vmcs12 = get_vmcs12(vcpu);
@ -5243,23 +5245,23 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (nested_vmx_allowed(vcpu) && if (nested_vmx_allowed(vcpu) &&
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) { (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr; kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr; kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
if (vmx_has_valid_vmcs12(vcpu)) { if (vmx_has_valid_vmcs12(vcpu)) {
kvm_state.size += VMCS12_SIZE; kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
if (is_guest_mode(vcpu) && if (is_guest_mode(vcpu) &&
nested_cpu_has_shadow_vmcs(vmcs12) && nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) vmcs12->vmcs_link_pointer != -1ull)
kvm_state.size += VMCS12_SIZE; kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
} }
if (vmx->nested.smm.vmxon) if (vmx->nested.smm.vmxon)
kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
if (vmx->nested.smm.guest_mode) if (vmx->nested.smm.guest_mode)
kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
if (is_guest_mode(vcpu)) { if (is_guest_mode(vcpu)) {
kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
@ -5294,16 +5296,19 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
copy_shadow_to_vmcs12(vmx); copy_shadow_to_vmcs12(vmx);
} }
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
/* /*
* Copy over the full allocated size of vmcs12 rather than just the size * Copy over the full allocated size of vmcs12 rather than just the size
* of the struct. * of the struct.
*/ */
if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE)) if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
return -EFAULT; return -EFAULT;
if (nested_cpu_has_shadow_vmcs(vmcs12) && if (nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) { vmcs12->vmcs_link_pointer != -1ull) {
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
get_shadow_vmcs12(vcpu), VMCS12_SIZE)) get_shadow_vmcs12(vcpu), VMCS12_SIZE))
return -EFAULT; return -EFAULT;
} }
@ -5331,33 +5336,35 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12; struct vmcs12 *vmcs12;
u32 exit_qual; u32 exit_qual;
struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
&user_kvm_nested_state->data.vmx[0];
int ret; int ret;
if (kvm_state->format != 0) if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
return -EINVAL; return -EINVAL;
if (!nested_vmx_allowed(vcpu)) if (!nested_vmx_allowed(vcpu))
return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL; return kvm_state->hdr.vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
if (kvm_state->vmx.vmxon_pa == -1ull) { if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
if (kvm_state->vmx.smm.flags) if (kvm_state->hdr.vmx.smm.flags)
return -EINVAL; return -EINVAL;
if (kvm_state->vmx.vmcs_pa != -1ull) if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
return -EINVAL; return -EINVAL;
vmx_leave_nested(vcpu); vmx_leave_nested(vcpu);
return 0; return 0;
} }
if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
return -EINVAL; return -EINVAL;
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
return -EINVAL; return -EINVAL;
if (kvm_state->vmx.smm.flags & if (kvm_state->hdr.vmx.smm.flags &
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL; return -EINVAL;
@ -5366,21 +5373,21 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
* must be zero. * must be zero.
*/ */
if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags) if (is_smm(vcpu) ? kvm_state->flags : kvm_state->hdr.vmx.smm.flags)
return -EINVAL; return -EINVAL;
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
!(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL; return -EINVAL;
vmx_leave_nested(vcpu); vmx_leave_nested(vcpu);
if (kvm_state->vmx.vmxon_pa == -1ull) if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
return 0; return 0;
if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
nested_enable_evmcs(vcpu, NULL); nested_enable_evmcs(vcpu, NULL);
vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa; vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
ret = enter_vmx_operation(vcpu); ret = enter_vmx_operation(vcpu);
if (ret) if (ret)
return ret; return ret;
@ -5389,12 +5396,12 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
return 0; return 0;
if (kvm_state->vmx.vmcs_pa != -1ull) { if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
!page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
return -EINVAL; return -EINVAL;
set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
} else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
/* /*
* Sync eVMCS upon entry as we may not have * Sync eVMCS upon entry as we may not have
@ -5405,16 +5412,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
} }
if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
vmx->nested.smm.vmxon = true; vmx->nested.smm.vmxon = true;
vmx->nested.vmxon = false; vmx->nested.vmxon = false;
if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
vmx->nested.smm.guest_mode = true; vmx->nested.smm.guest_mode = true;
} }
vmcs12 = get_vmcs12(vcpu); vmcs12 = get_vmcs12(vcpu);
if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12))) if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
return -EFAULT; return -EFAULT;
if (vmcs12->hdr.revision_id != VMCS12_REVISION) if (vmcs12->hdr.revision_id != VMCS12_REVISION)
@ -5431,12 +5438,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
vmcs12->vmcs_link_pointer != -1ull) { vmcs12->vmcs_link_pointer != -1ull) {
struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
if (kvm_state->size < sizeof(*kvm_state) + VMCS12_SIZE + sizeof(*vmcs12)) if (kvm_state->size <
sizeof(*kvm_state) +
sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
goto error_guest_mode; goto error_guest_mode;
if (copy_from_user(shadow_vmcs12, if (copy_from_user(shadow_vmcs12,
user_kvm_nested_state->data + VMCS12_SIZE, user_vmx_nested_state->shadow_vmcs12,
sizeof(*vmcs12))) { sizeof(*shadow_vmcs12))) {
ret = -EFAULT; ret = -EFAULT;
goto error_guest_mode; goto error_guest_mode;
} }

View File

@ -201,9 +201,10 @@ struct __packed vmcs12 {
/* /*
* VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
* and any VMCS region. Although only sizeof(struct vmcs12) are used by the * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
* current implementation, 4K are reserved to avoid future complications. * current implementation, 4K are reserved to avoid future complications and
* to preserve userspace ABI.
*/ */
#define VMCS12_SIZE 0x1000 #define VMCS12_SIZE KVM_STATE_NESTED_VMX_VMCS_SIZE
/* /*
* VMCS12_MAX_FIELD_INDEX is the highest index value used in any * VMCS12_MAX_FIELD_INDEX is the highest index value used in any

View File

@ -392,7 +392,7 @@ struct kvm_sync_regs {
struct kvm_vmx_nested_state { struct kvm_vmx_nested_state {
__u64 vmxon_pa; __u64 vmxon_pa;
__u64 vmcs_pa; __u64 vmcs12_pa;
struct { struct {
__u16 flags; __u16 flags;

View File

@ -75,7 +75,7 @@ void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
u32 vmcs12_revision) u32 vmcs12_revision)
{ {
/* Set revision_id in vmcs12 to vmcs12_revision. */ /* Set revision_id in vmcs12 to vmcs12_revision. */
memcpy(state->data, &vmcs12_revision, sizeof(u32)); memcpy(&state->data, &vmcs12_revision, sizeof(u32));
} }
void set_default_state(struct kvm_nested_state *state) void set_default_state(struct kvm_nested_state *state)
@ -95,9 +95,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
KVM_STATE_NESTED_EVMCS; KVM_STATE_NESTED_EVMCS;
state->format = 0; state->format = 0;
state->size = size; state->size = size;
state->vmx.vmxon_pa = 0x1000; state->hdr.vmx.vmxon_pa = 0x1000;
state->vmx.vmcs_pa = 0x2000; state->hdr.vmx.vmcs12_pa = 0x2000;
state->vmx.smm.flags = 0; state->hdr.vmx.smm.flags = 0;
set_revision_id_for_vmcs12(state, VMCS12_REVISION); set_revision_id_for_vmcs12(state, VMCS12_REVISION);
} }
@ -126,7 +126,7 @@ void test_vmx_nested_state(struct kvm_vm *vm)
* is set to -1ull. * is set to -1ull.
*/ */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull; state->hdr.vmx.vmxon_pa = -1ull;
test_nested_state(vm, state); test_nested_state(vm, state);
/* Enable VMX in the guest CPUID. */ /* Enable VMX in the guest CPUID. */
@ -134,14 +134,14 @@ void test_vmx_nested_state(struct kvm_vm *vm)
/* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull; state->hdr.vmx.vmxon_pa = -1ull;
state->vmx.smm.flags = 1; state->hdr.vmx.smm.flags = 1;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */ /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull; state->hdr.vmx.vmxon_pa = -1ull;
state->vmx.vmcs_pa = 0; state->hdr.vmx.vmcs12_pa = 0;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* /*
@ -149,13 +149,13 @@ void test_vmx_nested_state(struct kvm_vm *vm)
* setting the nested state. * setting the nested state.
*/ */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull; state->hdr.vmx.vmxon_pa = -1ull;
state->vmx.vmcs_pa = -1ull; state->hdr.vmx.vmcs12_pa = -1ull;
test_nested_state(vm, state); test_nested_state(vm, state);
/* It is invalid to have vmxon_pa set to a non-page aligned address. */ /* It is invalid to have vmxon_pa set to a non-page aligned address. */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = 1; state->hdr.vmx.vmxon_pa = 1;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* /*
@ -165,7 +165,7 @@ void test_vmx_nested_state(struct kvm_vm *vm)
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->flags = KVM_STATE_NESTED_GUEST_MODE | state->flags = KVM_STATE_NESTED_GUEST_MODE |
KVM_STATE_NESTED_RUN_PENDING; KVM_STATE_NESTED_RUN_PENDING;
state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* /*
@ -174,14 +174,14 @@ void test_vmx_nested_state(struct kvm_vm *vm)
* KVM_STATE_NESTED_SMM_VMXON * KVM_STATE_NESTED_SMM_VMXON
*/ */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE | state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
KVM_STATE_NESTED_SMM_VMXON); KVM_STATE_NESTED_SMM_VMXON);
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* Outside SMM, SMM flags must be zero. */ /* Outside SMM, SMM flags must be zero. */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->flags = 0; state->flags = 0;
state->vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* Size must be large enough to fit kvm_nested_state and vmcs12. */ /* Size must be large enough to fit kvm_nested_state and vmcs12. */
@ -191,8 +191,8 @@ void test_vmx_nested_state(struct kvm_vm *vm)
/* vmxon_pa cannot be the same address as vmcs_pa. */ /* vmxon_pa cannot be the same address as vmcs_pa. */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = 0; state->hdr.vmx.vmxon_pa = 0;
state->vmx.vmcs_pa = 0; state->hdr.vmx.vmcs12_pa = 0;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* The revision id for vmcs12 must be VMCS12_REVISION. */ /* The revision id for vmcs12 must be VMCS12_REVISION. */
@ -205,16 +205,16 @@ void test_vmx_nested_state(struct kvm_vm *vm)
* it again. * it again.
*/ */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->vmx.vmxon_pa = -1ull; state->hdr.vmx.vmxon_pa = -1ull;
state->vmx.vmcs_pa = -1ull; state->hdr.vmx.vmcs12_pa = -1ull;
state->flags = 0; state->flags = 0;
test_nested_state(vm, state); test_nested_state(vm, state);
vcpu_nested_state_get(vm, VCPU_ID, state); vcpu_nested_state_get(vm, VCPU_ID, state);
TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz, TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
"Size must be between %d and %d. The size returned was %d.", "Size must be between %d and %d. The size returned was %d.",
sizeof(*state), state_sz, state->size); sizeof(*state), state_sz, state->size);
TEST_ASSERT(state->vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull."); TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
TEST_ASSERT(state->vmx.vmcs_pa == -1ull, "vmcs_pa must be -1ull."); TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
free(state); free(state);
} }