1
0
Fork 0

ARM and x86 bugfixes of all kinds. The most visible one is that migrating

a nested hypervisor has always been busted on Broadwell and newer processors,
 and that has finally been fixed.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQEcBAABAgAGBQJdlzTRAAoJEL/70l94x66DElcH/Rvhn5VQE/n2J+tKEXAICxQu
 FqcTBJ5x2mp04aFe7xD3kWoKRJmz2lmHdw2ahFd4sqqLfGEFF/KW24ADI33vzLx/
 UmT78O0Je3PX77TRnEXy+napbJny0iT6ikTAQKPbyQ151JlqlbPvatpDXXLPWQHv
 jj6nKHCvMBrhV3kgaXO3cTFl8swX1hvR9lo9PcA2gRNt+HMN0heUmpfKughPoOes
 JH+UNjsEr7MYlXYlIIc9o71EYH+kgPObwlLejy0ture+dvvZEJUJjZJE8H/XG5f2
 ryXG9favaCOTAvaGf0R5Es+47A3crqUr6gHS0N28QKPn7x4hehIkKpA9dXQnWIw=
 =1/LN
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "ARM and x86 bugfixes of all kinds.

  The most visible one is that migrating a nested hypervisor has always
  been busted on Broadwell and newer processors, and that has finally
  been fixed"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (22 commits)
  KVM: x86: omit "impossible" pmu MSRs from MSR list
  KVM: nVMX: Fix consistency check on injected exception error code
  KVM: x86: omit absent pmu MSRs from MSR list
  selftests: kvm: Fix libkvm build error
  kvm: vmx: Limit guest PMCs to those supported on the host
  kvm: x86, powerpc: do not allow clearing largepages debugfs entry
  KVM: selftests: x86: clarify what is reported on KVM_GET_MSRS failure
  KVM: VMX: Set VMENTER_L1D_FLUSH_NOT_REQUIRED if !X86_BUG_L1TF
  selftests: kvm: add test for dirty logging inside nested guests
  KVM: x86: fix nested guest live migration with PML
  KVM: x86: assign two bits to track SPTE kinds
  KVM: x86: Expose XSAVEERPTR to the guest
  kvm: x86: Enumerate support for CLZERO instruction
  kvm: x86: Use AMD CPUID semantics for AMD vCPUs
  kvm: x86: Improve emulation of CPUID leaves 0BH and 1FH
  KVM: X86: Fix userspace set invalid CR4
  kvm: x86: Fix a spurious -E2BIG in __do_cpuid_func
  KVM: LAPIC: Loosen filter for adaptive tuning of lapic_timer_advance_ns
  KVM: arm/arm64: vgic: Use the appropriate TRACE_INCLUDE_PATH
  arm64: KVM: Kill hyp_alternate_select()
  ...
alistair/sunxi64-5.4-dsi
Linus Torvalds 2019-10-04 11:17:51 -07:00
commit b145b0eb20
23 changed files with 593 additions and 191 deletions

View File

@ -47,30 +47,6 @@
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
/**
* hyp_alternate_select - Generates patchable code sequences that are
* used to switch between two implementations of a function, depending
* on the availability of a feature.
*
* @fname: a symbol name that will be defined as a function returning a
* function pointer whose type will match @orig and @alt
* @orig: A pointer to the default function, as returned by @fname when
* @cond doesn't hold
* @alt: A pointer to the alternate function, as returned by @fname
* when @cond holds
* @cond: a CPU feature (as described in asm/cpufeature.h)
*/
#define hyp_alternate_select(fname, orig, alt, cond) \
typeof(orig) * __hyp_text fname(void) \
{ \
typeof(alt) *val = orig; \
asm volatile(ALTERNATIVE("nop \n", \
"mov %0, %1 \n", \
cond) \
: "+r" (val) : "r" (alt)); \
return val; \
}
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);

View File

@ -229,20 +229,6 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
}
}
static bool __hyp_text __true_value(void)
{
return true;
}
static bool __hyp_text __false_value(void)
{
return false;
}
static hyp_alternate_select(__check_arm_834220,
__false_value, __true_value,
ARM64_WORKAROUND_834220);
static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
{
u64 par, tmp;
@ -298,7 +284,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
* resolve the IPA using the AT instruction.
*/
if (!(esr & ESR_ELx_S1PTW) &&
(__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
(cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
if (!__translate_far_to_hpfar(far, &hpfar))
return false;
} else {

View File

@ -67,10 +67,14 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
isb();
}
static hyp_alternate_select(__tlb_switch_to_guest,
__tlb_switch_to_guest_nvhe,
__tlb_switch_to_guest_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
if (has_vhe())
__tlb_switch_to_guest_vhe(kvm, cxt);
else
__tlb_switch_to_guest_nvhe(kvm, cxt);
}
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
@ -98,10 +102,14 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
write_sysreg(0, vttbr_el2);
}
static hyp_alternate_select(__tlb_switch_to_host,
__tlb_switch_to_host_nvhe,
__tlb_switch_to_host_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
if (has_vhe())
__tlb_switch_to_host_vhe(kvm, cxt);
else
__tlb_switch_to_host_nvhe(kvm, cxt);
}
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
@ -111,7 +119,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
__tlb_switch_to_guest()(kvm, &cxt);
__tlb_switch_to_guest(kvm, &cxt);
/*
* We could do so much better if we had the VA as well.
@ -154,7 +162,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (!has_vhe() && icache_is_vpipt())
__flush_icache_all();
__tlb_switch_to_host()(kvm, &cxt);
__tlb_switch_to_host(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@ -165,13 +173,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
__tlb_switch_to_guest()(kvm, &cxt);
__tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
__tlb_switch_to_host()(kvm, &cxt);
__tlb_switch_to_host(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@ -180,13 +188,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
struct tlb_inv_context cxt;
/* Switch to requested VMID */
__tlb_switch_to_guest()(kvm, &cxt);
__tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalle1);
dsb(nsh);
isb();
__tlb_switch_to_host()(kvm, &cxt);
__tlb_switch_to_host(kvm, &cxt);
}
void __hyp_text __kvm_flush_vm_context(void)

View File

@ -36,8 +36,8 @@
#include "book3s.h"
#include "trace.h"
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
/* #define EXIT_DEBUG */
@ -69,8 +69,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pthru_all", VCPU_STAT(pthru_all) },
{ "pthru_host", VCPU_STAT(pthru_host) },
{ "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
{ "largepages_2M", VM_STAT(num_2M_pages) },
{ "largepages_1G", VM_STAT(num_1G_pages) },
{ "largepages_2M", VM_STAT(num_2M_pages, .mode = 0444) },
{ "largepages_1G", VM_STAT(num_1G_pages, .mode = 0444) },
{ NULL }
};

View File

@ -219,13 +219,6 @@ enum {
PFERR_WRITE_MASK | \
PFERR_PRESENT_MASK)
/*
* The mask used to denote special SPTEs, which can be either MMIO SPTEs or
* Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting
* with the SVE bit in EPT PTEs.
*/
#define SPTE_SPECIAL_MASK (1ULL << 62)
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
/*

View File

@ -485,6 +485,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
F(CLZERO) | F(XSAVEERPTR) |
F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON);
@ -618,16 +619,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
*/
case 0x1f:
case 0xb: {
int i, level_type;
int i;
/* read more entries until level_type is zero */
for (i = 1; ; ++i) {
/*
* We filled in entry[0] for CPUID(EAX=<function>,
* ECX=00H) above. If its level type (ECX[15:8]) is
* zero, then the leaf is unimplemented, and we're
* done. Otherwise, continue to populate entries
* until the level type (ECX[15:8]) of the previously
* added entry is zero.
*/
for (i = 1; entry[i - 1].ecx & 0xff00; ++i) {
if (*nent >= maxnent)
goto out;
level_type = entry[i - 1].ecx & 0xff00;
if (!level_type)
break;
do_host_cpuid(&entry[i], function, i);
++*nent;
}
@ -969,53 +974,66 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
/*
* If no match is found, check whether we exceed the vCPU's limit
* and return the content of the highest valid _standard_ leaf instead.
* This is to satisfy the CPUID specification.
* If the basic or extended CPUID leaf requested is higher than the
* maximum supported basic or extended leaf, respectively, then it is
* out of range.
*/
static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
u32 function, u32 index)
static bool cpuid_function_in_range(struct kvm_vcpu *vcpu, u32 function)
{
struct kvm_cpuid_entry2 *maxlevel;
struct kvm_cpuid_entry2 *max;
maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
if (!maxlevel || maxlevel->eax >= function)
return NULL;
if (function & 0x80000000) {
maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
if (!maxlevel)
return NULL;
}
return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
max = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
return max && function <= max->eax;
}
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
u32 *ecx, u32 *edx, bool check_limit)
{
u32 function = *eax, index = *ecx;
struct kvm_cpuid_entry2 *best;
bool entry_found = true;
struct kvm_cpuid_entry2 *entry;
struct kvm_cpuid_entry2 *max;
bool found;
best = kvm_find_cpuid_entry(vcpu, function, index);
if (!best) {
entry_found = false;
if (!check_limit)
goto out;
best = check_cpuid_limit(vcpu, function, index);
entry = kvm_find_cpuid_entry(vcpu, function, index);
found = entry;
/*
* Intel CPUID semantics treats any query for an out-of-range
* leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were
* requested. AMD CPUID semantics returns all zeroes for any
* undefined leaf, whether or not the leaf is in range.
*/
if (!entry && check_limit && !guest_cpuid_is_amd(vcpu) &&
!cpuid_function_in_range(vcpu, function)) {
max = kvm_find_cpuid_entry(vcpu, 0, 0);
if (max) {
function = max->eax;
entry = kvm_find_cpuid_entry(vcpu, function, index);
}
}
out:
if (best) {
*eax = best->eax;
*ebx = best->ebx;
*ecx = best->ecx;
*edx = best->edx;
} else
if (entry) {
*eax = entry->eax;
*ebx = entry->ebx;
*ecx = entry->ecx;
*edx = entry->edx;
} else {
*eax = *ebx = *ecx = *edx = 0;
trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
return entry_found;
/*
* When leaf 0BH or 1FH is defined, CL is pass-through
* and EDX is always the x2APIC ID, even for undefined
* subleaves. Index 1 will exist iff the leaf is
* implemented, so we pass through CL iff leaf 1
* exists. EDX can be copied from any existing index.
*/
if (function == 0xb || function == 0x1f) {
entry = kvm_find_cpuid_entry(vcpu, function, 1);
if (entry) {
*ecx = index & 0xff;
*edx = entry->edx;
}
}
}
trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, found);
return found;
}
EXPORT_SYMBOL_GPL(kvm_cpuid);

View File

@ -66,9 +66,10 @@
#define X2APIC_BROADCAST 0xFFFFFFFFul
static bool lapic_timer_advance_dynamic __read_mostly;
#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 5000
#define LAPIC_TIMER_ADVANCE_ADJUST_INIT 1000
#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
#define LAPIC_TIMER_ADVANCE_NS_MAX 5000
/* step-by-step approximation to mitigate fluctuation */
#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
@ -1504,8 +1505,8 @@ static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
}
if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_ADJUST_MAX))
timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT;
if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
apic->lapic_timer.timer_advance_ns = timer_advance_ns;
}
@ -2302,7 +2303,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
HRTIMER_MODE_ABS_HARD);
apic->lapic_timer.timer.function = apic_timer_fn;
if (timer_advance_ns == -1) {
apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT;
apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
lapic_timer_advance_dynamic = true;
} else {
apic->lapic_timer.timer_advance_ns = timer_advance_ns;

View File

@ -83,7 +83,17 @@ module_param(dbg, bool, 0644);
#define PTE_PREFETCH_NUM 8
#define PT_FIRST_AVAIL_BITS_SHIFT 10
#define PT64_SECOND_AVAIL_BITS_SHIFT 52
#define PT64_SECOND_AVAIL_BITS_SHIFT 54
/*
* The mask used to denote special SPTEs, which can be either MMIO SPTEs or
* Access Tracking SPTEs.
*/
#define SPTE_SPECIAL_MASK (3ULL << 52)
#define SPTE_AD_ENABLED_MASK (0ULL << 52)
#define SPTE_AD_DISABLED_MASK (1ULL << 52)
#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
#define SPTE_MMIO_MASK (3ULL << 52)
#define PT64_LEVEL_BITS 9
@ -219,12 +229,11 @@ static u64 __read_mostly shadow_present_mask;
static u64 __read_mostly shadow_me_mask;
/*
* SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value.
* Non-present SPTEs with shadow_acc_track_value set are in place for access
* tracking.
* SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK;
* shadow_acc_track_mask is the set of bits to be cleared in non-accessed
* pages.
*/
static u64 __read_mostly shadow_acc_track_mask;
static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK;
/*
* The mask/shift to use for saving the original R/X bits when marking the PTE
@ -304,7 +313,7 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
{
BUG_ON((u64)(unsigned)access_mask != access_mask);
BUG_ON((mmio_mask & mmio_value) != mmio_value);
shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
shadow_mmio_access_mask = access_mask;
}
@ -320,10 +329,27 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
return sp->role.ad_disabled;
}
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
{
/*
* When using the EPT page-modification log, the GPAs in the log
* would come from L2 rather than L1. Therefore, we need to rely
* on write protection to record dirty pages. This also bypasses
* PML, since writes now result in a vmexit.
*/
return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
}
static inline bool spte_ad_enabled(u64 spte)
{
MMU_WARN_ON(is_mmio_spte(spte));
return !(spte & shadow_acc_track_value);
return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK;
}
static inline bool spte_ad_need_write_protect(u64 spte)
{
MMU_WARN_ON(is_mmio_spte(spte));
return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
}
static inline u64 spte_shadow_accessed_mask(u64 spte)
@ -461,7 +487,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
{
BUG_ON(!dirty_mask != !accessed_mask);
BUG_ON(!accessed_mask && !acc_track_mask);
BUG_ON(acc_track_mask & shadow_acc_track_value);
BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK);
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
@ -1589,16 +1615,16 @@ static bool spte_clear_dirty(u64 *sptep)
rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
MMU_WARN_ON(!spte_ad_enabled(spte));
spte &= ~shadow_dirty_mask;
return mmu_spte_update(sptep, spte);
}
static bool wrprot_ad_disabled_spte(u64 *sptep)
static bool spte_wrprot_for_clear_dirty(u64 *sptep)
{
bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
(unsigned long *)sptep);
if (was_writable)
if (was_writable && !spte_ad_enabled(*sptep))
kvm_set_pfn_dirty(spte_to_pfn(*sptep));
return was_writable;
@ -1617,10 +1643,10 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep)
if (spte_ad_enabled(*sptep))
flush |= spte_clear_dirty(sptep);
if (spte_ad_need_write_protect(*sptep))
flush |= spte_wrprot_for_clear_dirty(sptep);
else
flush |= wrprot_ad_disabled_spte(sptep);
flush |= spte_clear_dirty(sptep);
return flush;
}
@ -1631,6 +1657,11 @@ static bool spte_set_dirty(u64 *sptep)
rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
/*
* Similar to the !kvm_x86_ops->slot_disable_log_dirty case,
* do not bother adding back write access to pages marked
* SPTE_AD_WRPROT_ONLY_MASK.
*/
spte |= shadow_dirty_mask;
return mmu_spte_update(sptep, spte);
@ -2622,7 +2653,7 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
shadow_user_mask | shadow_x_mask | shadow_me_mask;
if (sp_ad_disabled(sp))
spte |= shadow_acc_track_value;
spte |= SPTE_AD_DISABLED_MASK;
else
spte |= shadow_accessed_mask;
@ -2968,7 +2999,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
sp = page_header(__pa(sptep));
if (sp_ad_disabled(sp))
spte |= shadow_acc_track_value;
spte |= SPTE_AD_DISABLED_MASK;
else if (kvm_vcpu_ad_need_write_protect(vcpu))
spte |= SPTE_AD_WRPROT_ONLY_MASK;
/*
* For the EPT case, shadow_present_mask is 0 if hardware

View File

@ -2610,7 +2610,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
/* VM-entry exception error code */
if (CC(has_error_code &&
vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)))
vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
return -EINVAL;
/* VM-entry interruption-info field: reserved bits */

View File

@ -262,6 +262,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct x86_pmu_capability x86_pmu;
struct kvm_cpuid_entry2 *entry;
union cpuid10_eax eax;
union cpuid10_edx edx;
@ -283,8 +284,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (!pmu->version)
return;
perf_get_x86_pmu_capability(&x86_pmu);
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters_gp);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1);
@ -294,7 +297,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
} else {
pmu->nr_arch_fixed_counters =
min_t(int, edx.split.num_counters_fixed,
INTEL_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
}

View File

@ -209,6 +209,11 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
struct page *page;
unsigned int i;
if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
return 0;
}
if (!enable_ept) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
return 0;
@ -7995,12 +8000,10 @@ static int __init vmx_init(void)
* contain 'auto' which will be turned into the default 'cond'
* mitigation mode.
*/
if (boot_cpu_has(X86_BUG_L1TF)) {
r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
if (r) {
vmx_exit();
return r;
}
r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
if (r) {
vmx_exit();
return r;
}
#ifdef CONFIG_KEXEC_CORE

View File

@ -92,8 +92,8 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
@ -212,7 +212,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
{ "mmu_unsync", VM_STAT(mmu_unsync) },
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
{ "largepages", VM_STAT(lpages) },
{ "largepages", VM_STAT(lpages, .mode = 0444) },
{ "max_mmu_page_hash_collisions",
VM_STAT(max_mmu_page_hash_collisions) },
{ NULL }
@ -885,34 +885,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);
static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
if (cr4 & CR4_RESERVED_BITS)
return -EINVAL;
if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
return -EINVAL;
if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
return -EINVAL;
if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
return -EINVAL;
if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
return -EINVAL;
if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
return -EINVAL;
if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
return -EINVAL;
if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
return -EINVAL;
return 0;
}
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
if (cr4 & CR4_RESERVED_BITS)
return 1;
if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
return 1;
if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
return 1;
if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
return 1;
if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
return 1;
if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
return 1;
if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
return 1;
if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
if (kvm_valid_cr4(vcpu, cr4))
return 1;
if (is_long_mode(vcpu)) {
@ -1161,13 +1169,6 @@ static u32 msrs_to_save[] = {
MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
MSR_ARCH_PERFMON_PERFCTR0 + 18, MSR_ARCH_PERFMON_PERFCTR0 + 19,
MSR_ARCH_PERFMON_PERFCTR0 + 20, MSR_ARCH_PERFMON_PERFCTR0 + 21,
MSR_ARCH_PERFMON_PERFCTR0 + 22, MSR_ARCH_PERFMON_PERFCTR0 + 23,
MSR_ARCH_PERFMON_PERFCTR0 + 24, MSR_ARCH_PERFMON_PERFCTR0 + 25,
MSR_ARCH_PERFMON_PERFCTR0 + 26, MSR_ARCH_PERFMON_PERFCTR0 + 27,
MSR_ARCH_PERFMON_PERFCTR0 + 28, MSR_ARCH_PERFMON_PERFCTR0 + 29,
MSR_ARCH_PERFMON_PERFCTR0 + 30, MSR_ARCH_PERFMON_PERFCTR0 + 31,
MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
@ -1177,13 +1178,6 @@ static u32 msrs_to_save[] = {
MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
MSR_ARCH_PERFMON_EVENTSEL0 + 18, MSR_ARCH_PERFMON_EVENTSEL0 + 19,
MSR_ARCH_PERFMON_EVENTSEL0 + 20, MSR_ARCH_PERFMON_EVENTSEL0 + 21,
MSR_ARCH_PERFMON_EVENTSEL0 + 22, MSR_ARCH_PERFMON_EVENTSEL0 + 23,
MSR_ARCH_PERFMON_EVENTSEL0 + 24, MSR_ARCH_PERFMON_EVENTSEL0 + 25,
MSR_ARCH_PERFMON_EVENTSEL0 + 26, MSR_ARCH_PERFMON_EVENTSEL0 + 27,
MSR_ARCH_PERFMON_EVENTSEL0 + 28, MSR_ARCH_PERFMON_EVENTSEL0 + 29,
MSR_ARCH_PERFMON_EVENTSEL0 + 30, MSR_ARCH_PERFMON_EVENTSEL0 + 31,
};
static unsigned num_msrs_to_save;
@ -5097,13 +5091,14 @@ out:
static void kvm_init_msr_list(void)
{
struct x86_pmu_capability x86_pmu;
u32 dummy[2];
unsigned i, j;
BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
"Please update the fixed PMCs in msrs_to_save[]");
BUILD_BUG_ON_MSG(INTEL_PMC_MAX_GENERIC != 32,
"Please update the generic perfctr/eventsel MSRs in msrs_to_save[]");
perf_get_x86_pmu_capability(&x86_pmu);
for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
@ -5145,6 +5140,15 @@ static void kvm_init_msr_list(void)
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
continue;
break;
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
if (msrs_to_save[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue;
break;
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
if (msrs_to_save[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue;
}
default:
break;
@ -8714,10 +8718,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
(sregs->cr4 & X86_CR4_OSXSAVE))
return -EINVAL;
if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
/*
* When EFER.LME and CR0.PG are set, the processor is in
@ -8736,7 +8736,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return -EINVAL;
}
return 0;
return kvm_valid_cr4(vcpu, sregs->cr4);
}
static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)

View File

@ -1090,6 +1090,7 @@ enum kvm_stat_kind {
struct kvm_stat_data {
int offset;
int mode;
struct kvm *kvm;
};
@ -1097,6 +1098,7 @@ struct kvm_stats_debugfs_item {
const char *name;
int offset;
enum kvm_stat_kind kind;
int mode;
};
extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;

View File

@ -22,6 +22,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
@ -48,7 +49,7 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
$(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
$(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
# On s390, build the testcases KVM-enabled
pgste-option = $(call try-run, echo 'int main() { return 0; }' | \

View File

@ -1083,6 +1083,9 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
#define VMX_BASIC_MEM_TYPE_WB 6LLU
#define VMX_BASIC_INOUT 0x0040000000000000LLU
/* VMX_EPT_VPID_CAP bits */
#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
/* MSR_IA32_VMX_MISC bits */
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F

View File

@ -569,6 +569,10 @@ struct vmx_pages {
void *enlightened_vmcs_hva;
uint64_t enlightened_vmcs_gpa;
void *enlightened_vmcs;
void *eptp_hva;
uint64_t eptp_gpa;
void *eptp;
};
struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
@ -576,4 +580,14 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx);
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint64_t size,
uint32_t eptp_memslot);
void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot, uint32_t eptp_memslot);
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot);
#endif /* SELFTEST_KVM_VMX_H */

View File

@ -705,7 +705,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
* on error (e.g. currently no memory region using memslot as a KVM
* memory slot ID).
*/
static struct userspace_mem_region *
struct userspace_mem_region *
memslot2region(struct kvm_vm *vm, uint32_t memslot)
{
struct userspace_mem_region *region;

View File

@ -68,4 +68,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
struct userspace_mem_region *
memslot2region(struct kvm_vm *vm, uint32_t memslot);
#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */

View File

@ -1085,7 +1085,7 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
for (i = 0; i < nmsrs; i++)
state->msrs.entries[i].index = list->indices[i];
r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed at %x)",
TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)",
r, r == nmsrs ? -1 : list->indices[r]);
r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);

View File

@ -7,11 +7,39 @@
#include "test_util.h"
#include "kvm_util.h"
#include "../kvm_util_internal.h"
#include "processor.h"
#include "vmx.h"
#define PAGE_SHIFT_4K 12
#define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
bool enable_evmcs;
struct eptPageTableEntry {
uint64_t readable:1;
uint64_t writable:1;
uint64_t executable:1;
uint64_t memory_type:3;
uint64_t ignore_pat:1;
uint64_t page_size:1;
uint64_t accessed:1;
uint64_t dirty:1;
uint64_t ignored_11_10:2;
uint64_t address:40;
uint64_t ignored_62_52:11;
uint64_t suppress_ve:1;
};
struct eptPageTablePointer {
uint64_t memory_type:3;
uint64_t page_walk_length:3;
uint64_t ad_enabled:1;
uint64_t reserved_11_07:5;
uint64_t address:40;
uint64_t reserved_63_52:12;
};
int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
{
uint16_t evmcs_ver;
@ -174,15 +202,35 @@ bool load_vmcs(struct vmx_pages *vmx)
*/
static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
{
uint32_t sec_exec_ctl = 0;
vmwrite(VIRTUAL_PROCESSOR_ID, 0);
vmwrite(POSTED_INTR_NV, 0);
vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, 0))
if (vmx->eptp_gpa) {
uint64_t ept_paddr;
struct eptPageTablePointer eptp = {
.memory_type = VMX_BASIC_MEM_TYPE_WB,
.page_walk_length = 3, /* + 1 */
.ad_enabled = !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & VMX_EPT_VPID_CAP_AD_BITS),
.address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
};
memcpy(&ept_paddr, &eptp, sizeof(ept_paddr));
vmwrite(EPT_POINTER, ept_paddr);
sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT;
}
if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl))
vmwrite(CPU_BASED_VM_EXEC_CONTROL,
rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
else
else {
vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
GUEST_ASSERT(!sec_exec_ctl);
}
vmwrite(EXCEPTION_BITMAP, 0);
vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
@ -327,3 +375,152 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
init_vmcs_host_state();
init_vmcs_guest_state(guest_rip, guest_rsp);
}
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
{
uint16_t index[4];
struct eptPageTableEntry *pml4e;
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
TEST_ASSERT((nested_paddr % vm->page_size) == 0,
"Nested physical address not on page boundary,\n"
" nested_paddr: 0x%lx vm->page_size: 0x%x",
nested_paddr, vm->page_size);
TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
TEST_ASSERT((paddr % vm->page_size) == 0,
"Physical address not on page boundary,\n"
" paddr: 0x%lx vm->page_size: 0x%x",
paddr, vm->page_size);
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
index[0] = (nested_paddr >> 12) & 0x1ffu;
index[1] = (nested_paddr >> 21) & 0x1ffu;
index[2] = (nested_paddr >> 30) & 0x1ffu;
index[3] = (nested_paddr >> 39) & 0x1ffu;
/* Allocate page directory pointer table if not present. */
pml4e = vmx->eptp_hva;
if (!pml4e[index[3]].readable) {
pml4e[index[3]].address = vm_phy_page_alloc(vm,
KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
>> vm->page_shift;
pml4e[index[3]].writable = true;
pml4e[index[3]].readable = true;
pml4e[index[3]].executable = true;
}
/* Allocate page directory table if not present. */
struct eptPageTableEntry *pdpe;
pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
if (!pdpe[index[2]].readable) {
pdpe[index[2]].address = vm_phy_page_alloc(vm,
KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
>> vm->page_shift;
pdpe[index[2]].writable = true;
pdpe[index[2]].readable = true;
pdpe[index[2]].executable = true;
}
/* Allocate page table if not present. */
struct eptPageTableEntry *pde;
pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
if (!pde[index[1]].readable) {
pde[index[1]].address = vm_phy_page_alloc(vm,
KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
>> vm->page_shift;
pde[index[1]].writable = true;
pde[index[1]].readable = true;
pde[index[1]].executable = true;
}
/* Fill in page table entry. */
struct eptPageTableEntry *pte;
pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
pte[index[0]].address = paddr >> vm->page_shift;
pte[index[0]].writable = true;
pte[index[0]].readable = true;
pte[index[0]].executable = true;
/*
* For now mark these as accessed and dirty because the only
* testcase we have needs that. Can be reconsidered later.
*/
pte[index[0]].accessed = true;
pte[index[0]].dirty = true;
}
/*
* Map a range of EPT guest physical addresses to the VM's physical address
*
* Input Args:
* vm - Virtual Machine
* nested_paddr - Nested guest physical address to map
* paddr - VM Physical Address
* size - The size of the range to map
* eptp_memslot - Memory region slot for new virtual translation tables
*
* Output Args: None
*
* Return: None
*
* Within the VM given by vm, creates a nested guest translation for the
* page range starting at nested_paddr to the page range starting at paddr.
*/
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint64_t size,
uint32_t eptp_memslot)
{
size_t page_size = vm->page_size;
size_t npages = size / page_size;
TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
while (npages--) {
nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot);
nested_paddr += page_size;
paddr += page_size;
}
}
/* Prepare an identity extended page table that maps all the
* physical pages in VM.
*/
void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot, uint32_t eptp_memslot)
{
sparsebit_idx_t i, last;
struct userspace_mem_region *region =
memslot2region(vm, memslot);
i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
last = i + (region->region.memory_size >> vm->page_shift);
for (;;) {
i = sparsebit_next_clear(region->unused_phy_pages, i);
if (i > last)
break;
nested_map(vmx, vm,
(uint64_t)i << vm->page_shift,
(uint64_t)i << vm->page_shift,
1 << vm->page_shift,
eptp_memslot);
}
}
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot)
{
vmx->eptp = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
}

View File

@ -0,0 +1,156 @@
// SPDX-License-Identifier: GPL-2.0
/*
* KVM dirty page logging test
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#define _GNU_SOURCE /* for program_invocation_name */
#include <stdio.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"
#define VCPU_ID 1
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
#define TEST_MEM_SIZE 3
/* L1 guest test virtual memory offset */
#define GUEST_TEST_MEM 0xc0000000
/* L2 guest test virtual memory offset */
#define NESTED_TEST_MEM1 0xc0001000
#define NESTED_TEST_MEM2 0xc0002000
static void l2_guest_code(void)
{
*(volatile uint64_t *)NESTED_TEST_MEM1;
*(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
GUEST_SYNC(true);
GUEST_SYNC(false);
*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
GUEST_SYNC(true);
*(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
GUEST_SYNC(true);
GUEST_SYNC(false);
/* Exit to L1 and never come back. */
vmcall();
}
void l1_guest_code(struct vmx_pages *vmx)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
GUEST_ASSERT(vmx->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx));
GUEST_ASSERT(load_vmcs(vmx));
prepare_vmcs(vmx, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
GUEST_SYNC(false);
GUEST_ASSERT(!vmlaunch());
GUEST_SYNC(false);
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_DONE();
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva = 0;
struct vmx_pages *vmx;
unsigned long *bmap;
uint64_t *host_test_mem;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
bool done = false;
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
run = vcpu_state(vm, VCPU_ID);
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
GUEST_TEST_MEM,
TEST_MEM_SLOT_INDEX,
TEST_MEM_SIZE,
KVM_MEM_LOG_DIRTY_PAGES);
/*
* Add an identity map for GVA range [0xc0000000, 0xc0002000). This
* affects both L1 and L2. However...
*/
virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM,
TEST_MEM_SIZE * 4096, 0);
/*
* ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
* 0xc0000000.
*
* Note that prepare_eptp should be called only L1's GPA map is done,
* meaning after the last call to virt_map.
*/
prepare_eptp(vmx, vm, 0);
nested_map_memslot(vmx, vm, 0, 0);
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
bmap = bitmap_alloc(TEST_MEM_SIZE);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
while (!done) {
memset(host_test_mem, 0xaa, TEST_MEM_SIZE * 4096);
_vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
__FILE__, uc.args[1]);
/* NOT REACHED */
case UCALL_SYNC:
/*
* The nested guest wrote at offset 0x1000 in the memslot, but the
* dirty bitmap must be filled in according to L1 GPA, not L2.
*/
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
if (uc.args[1]) {
TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
} else {
TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
}
TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
break;
case UCALL_DONE:
done = true;
break;
default:
TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
}
}
}

View File

@ -30,7 +30,7 @@ TRACE_EVENT(vgic_update_irq_pending,
#endif /* _TRACE_VGIC_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm/vgic
#define TRACE_INCLUDE_PATH ../../virt/kvm/arm/vgic
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace

View File

@ -617,8 +617,9 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
stat_data->kvm = kvm;
stat_data->offset = p->offset;
stat_data->mode = p->mode ? p->mode : 0644;
kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
debugfs_create_file(p->name, 0644, kvm->debugfs_dentry,
debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry,
stat_data, stat_fops_per_vm[p->kind]);
}
return 0;
@ -3929,7 +3930,9 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
return -ENOENT;
if (simple_attr_open(inode, file, get, set, fmt)) {
if (simple_attr_open(inode, file, get,
stat_data->mode & S_IWUGO ? set : NULL,
fmt)) {
kvm_put_kvm(stat_data->kvm);
return -ENOMEM;
}
@ -4177,7 +4180,8 @@ static void kvm_init_debug(void)
kvm_debugfs_num_entries = 0;
for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
int mode = p->mode ? p->mode : 0644;
debugfs_create_file(p->name, mode, kvm_debugfs_dir,
(void *)(long)p->offset,
stat_fops[p->kind]);
}