1
0
Fork 0

Bugfixes (arm and x86) and cleanups.

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQEcBAABAgAGBQJdTfRfAAoJEL/70l94x66DcN0IAIwyaU2+kwP0jd2miQuKxgwl
 WU4u7dZCoQC6meWEVmrSJIVMBONRubmZ9iCqT7807YP8YZSQpOth51FMbULUWuy1
 VW1eaRwqidX0EAihDhg2ZbBZ8H6RQ9Fn0aiEEh44dAZZAwGSVnO3PRKvQEJ15xjk
 q+OQ4hrxtoorwLj+myejmq3YenTFTCMMJfYwwvlCl+J1FfrLZi5k3X5Gjk+j8Ixd
 8CL8/6u5Lu6MCgfYVvxvo8/bUPiATBdF1sWJMMALwXTrDiSy4tQRD0NvZP1HM8G1
 hy0XnhgtsS9rWNLtAFOj+r/XhP9V5lOOGX8yBcj0XQQr+DC9MG6MCL+pXXOaMcA=
 =ZZh8
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "Bugfixes (arm and x86) and cleanups"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  selftests: kvm: Adding config fragments
  KVM: selftests: Update gitignore file for latest changes
  kvm: remove unnecessary PageReserved check
  KVM: arm/arm64: vgic: Reevaluate level sensitive interrupts on enable
  KVM: arm: Don't write junk to CP15 registers on reset
  KVM: arm64: Don't write junk to sysregs on reset
  KVM: arm/arm64: Sync ICH_VMCR_EL2 back when about to block
  x86: kvm: remove useless calls to kvm_para_available
  KVM: no need to check return value of debugfs_create functions
  KVM: remove kvm_arch_has_vcpu_debugfs()
  KVM: Fix leak vCPU's VMCS value into other pCPU
  KVM: Check preempted_in_kernel for involuntary preemption
  KVM: LAPIC: Don't need to wakeup vCPU twice afer timer fire
  arm64: KVM: hyp: debug-sr: Mark expected switch fall-through
  KVM: arm64: Update kvm_arm_exception_class and esr_class_str for new EC
  KVM: arm: vgic-v3: Mark expected switch fall-through
  arm64: KVM: regmap: Fix unexpected switch fall-through
  KVM: arm/arm64: Introduce kvm_pmu_vcpu_init() to setup PMU counter index
alistair/sunxi64-5.4-dsi
Linus Torvalds 2019-08-09 15:46:29 -07:00
commit 7f20fd2337
30 changed files with 249 additions and 142 deletions

View File

@ -651,13 +651,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
static void reset_coproc_regs(struct kvm_vcpu *vcpu,
const struct coproc_reg *table, size_t num)
const struct coproc_reg *table, size_t num,
unsigned long *bmap)
{
unsigned long i;
for (i = 0; i < num; i++)
if (table[i].reset)
if (table[i].reset) {
int reg = table[i].reg;
table[i].reset(vcpu, &table[i]);
if (reg > 0 && reg < NR_CP15_REGS) {
set_bit(reg, bmap);
if (table[i].is_64bit)
set_bit(reg + 1, bmap);
}
}
}
static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
@ -1432,17 +1441,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
{
size_t num;
const struct coproc_reg *table;
/* Catch someone adding a register without putting in reset entry. */
memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
/* Generic chip reset first (so target could override). */
reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
table = get_target_table(vcpu->arch.target, &num);
reset_coproc_regs(vcpu, table, num);
reset_coproc_regs(vcpu, table, num, bmap);
for (num = 1; num < NR_CP15_REGS; num++)
WARN(vcpu_cp15(vcpu, num) == 0x42424242,
WARN(!test_bit(num, bmap),
"Didn't reset vcpu_cp15(vcpu, %zi)", num);
}

View File

@ -316,9 +316,10 @@
#define kvm_arm_exception_class \
ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(CP14_64), ECN(SVC64), \
ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(IMP_DEF), ECN(IABT_LOW), \
ECN(IABT_CUR), ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(PAC), ECN(CP14_64), \
ECN(SVC64), ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(SVE), \
ECN(IMP_DEF), ECN(IABT_LOW), ECN(IABT_CUR), \
ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \

View File

@ -733,6 +733,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
[ESR_ELx_EC_FP_ASIMD] = "ASIMD",
[ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
[ESR_ELx_EC_PAC] = "PAC",
[ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
[ESR_ELx_EC_ILL] = "PSTATE.IL",
[ESR_ELx_EC_SVC32] = "SVC (AArch32)",

View File

@ -18,40 +18,70 @@
#define save_debug(ptr,reg,nr) \
switch (nr) { \
case 15: ptr[15] = read_debug(reg, 15); \
/* Fall through */ \
case 14: ptr[14] = read_debug(reg, 14); \
/* Fall through */ \
case 13: ptr[13] = read_debug(reg, 13); \
/* Fall through */ \
case 12: ptr[12] = read_debug(reg, 12); \
/* Fall through */ \
case 11: ptr[11] = read_debug(reg, 11); \
/* Fall through */ \
case 10: ptr[10] = read_debug(reg, 10); \
/* Fall through */ \
case 9: ptr[9] = read_debug(reg, 9); \
/* Fall through */ \
case 8: ptr[8] = read_debug(reg, 8); \
/* Fall through */ \
case 7: ptr[7] = read_debug(reg, 7); \
/* Fall through */ \
case 6: ptr[6] = read_debug(reg, 6); \
/* Fall through */ \
case 5: ptr[5] = read_debug(reg, 5); \
/* Fall through */ \
case 4: ptr[4] = read_debug(reg, 4); \
/* Fall through */ \
case 3: ptr[3] = read_debug(reg, 3); \
/* Fall through */ \
case 2: ptr[2] = read_debug(reg, 2); \
/* Fall through */ \
case 1: ptr[1] = read_debug(reg, 1); \
/* Fall through */ \
default: ptr[0] = read_debug(reg, 0); \
}
#define restore_debug(ptr,reg,nr) \
switch (nr) { \
case 15: write_debug(ptr[15], reg, 15); \
/* Fall through */ \
case 14: write_debug(ptr[14], reg, 14); \
/* Fall through */ \
case 13: write_debug(ptr[13], reg, 13); \
/* Fall through */ \
case 12: write_debug(ptr[12], reg, 12); \
/* Fall through */ \
case 11: write_debug(ptr[11], reg, 11); \
/* Fall through */ \
case 10: write_debug(ptr[10], reg, 10); \
/* Fall through */ \
case 9: write_debug(ptr[9], reg, 9); \
/* Fall through */ \
case 8: write_debug(ptr[8], reg, 8); \
/* Fall through */ \
case 7: write_debug(ptr[7], reg, 7); \
/* Fall through */ \
case 6: write_debug(ptr[6], reg, 6); \
/* Fall through */ \
case 5: write_debug(ptr[5], reg, 5); \
/* Fall through */ \
case 4: write_debug(ptr[4], reg, 4); \
/* Fall through */ \
case 3: write_debug(ptr[3], reg, 3); \
/* Fall through */ \
case 2: write_debug(ptr[2], reg, 2); \
/* Fall through */ \
case 1: write_debug(ptr[1], reg, 1); \
/* Fall through */ \
default: write_debug(ptr[0], reg, 0); \
}

View File

@ -178,13 +178,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
switch (spsr_idx) {
case KVM_SPSR_SVC:
write_sysreg_el1(v, SYS_SPSR);
break;
case KVM_SPSR_ABT:
write_sysreg(v, spsr_abt);
break;
case KVM_SPSR_UND:
write_sysreg(v, spsr_und);
break;
case KVM_SPSR_IRQ:
write_sysreg(v, spsr_irq);
break;
case KVM_SPSR_FIQ:
write_sysreg(v, spsr_fiq);
break;
}
}

View File

@ -632,7 +632,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
*/
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
__vcpu_sys_reg(vcpu, r->reg) = val;
}
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
@ -981,13 +981,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
@ -1540,7 +1540,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
@ -2254,13 +2254,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
}
static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *table, size_t num)
const struct sys_reg_desc *table, size_t num,
unsigned long *bmap)
{
unsigned long i;
for (i = 0; i < num; i++)
if (table[i].reset)
if (table[i].reset) {
int reg = table[i].reg;
table[i].reset(vcpu, &table[i]);
if (reg > 0 && reg < NR_SYS_REGS)
set_bit(reg, bmap);
}
}
/**
@ -2774,18 +2780,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
{
size_t num;
const struct sys_reg_desc *table;
/* Catch someone adding a register without putting in reset entry. */
memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
/* Generic chip reset first (so target could override). */
reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
table = get_target_table(vcpu->arch.target, true, &num);
reset_sys_reg_descs(vcpu, table, num);
reset_sys_reg_descs(vcpu, table, num, bmap);
for (num = 1; num < NR_SYS_REGS; num++) {
if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
if (WARN(!test_bit(num, bmap),
"Didn't reset __vcpu_sys_reg(%zi)\n", num))
break;
}

View File

@ -150,16 +150,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return 0;
}
bool kvm_arch_has_vcpu_debugfs(void)
{
return false;
}
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_mips_free_vcpus(struct kvm *kvm)
{
unsigned int i;

View File

@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
}
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_runnable(vcpu);
}
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
@ -452,16 +457,6 @@ err_out:
return -EINVAL;
}
bool kvm_arch_has_vcpu_debugfs(void)
{
return false;
}
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
unsigned int i;

View File

@ -2516,16 +2516,6 @@ out_err:
return rc;
}
bool kvm_arch_has_vcpu_debugfs(void)
{
return false;
}
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 3, "%s", "free cpu");

View File

@ -35,6 +35,8 @@
#include <asm/kvm_vcpu_regs.h>
#include <asm/hyperv-tlfs.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
#define KVM_MAX_VCPUS 288
#define KVM_SOFT_MAX_VCPUS 240
#define KVM_MAX_VCPU_ID 1023
@ -1175,6 +1177,7 @@ struct kvm_x86_ops {
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
bool *expired);

View File

@ -308,9 +308,6 @@ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
static void kvm_guest_cpu_init(void)
{
if (!kvm_para_available())
return;
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
@ -625,9 +622,6 @@ static void __init kvm_guest_init(void)
{
int i;
if (!kvm_para_available())
return;
paravirt_ops_setup();
register_reboot_notifier(&kvm_pv_reboot_nb);
for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
@ -848,8 +842,6 @@ asm(
*/
void __init kvm_spinlock_init(void)
{
if (!kvm_para_available())
return;
/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
return;

View File

@ -8,11 +8,6 @@
#include <linux/debugfs.h>
#include "lapic.h"
bool kvm_arch_has_vcpu_debugfs(void)
{
return true;
}
static int vcpu_get_timer_advance_ns(void *data, u64 *val)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
@ -48,37 +43,22 @@ static int vcpu_get_tsc_scaling_frac_bits(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n");
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
struct dentry *ret;
debugfs_create_file("tsc-offset", 0444, vcpu->debugfs_dentry, vcpu,
&vcpu_tsc_offset_fops);
ret = debugfs_create_file("tsc-offset", 0444,
vcpu->debugfs_dentry,
vcpu, &vcpu_tsc_offset_fops);
if (!ret)
return -ENOMEM;
if (lapic_in_kernel(vcpu)) {
ret = debugfs_create_file("lapic_timer_advance_ns", 0444,
vcpu->debugfs_dentry,
vcpu, &vcpu_timer_advance_ns_fops);
if (!ret)
return -ENOMEM;
}
if (lapic_in_kernel(vcpu))
debugfs_create_file("lapic_timer_advance_ns", 0444,
vcpu->debugfs_dentry, vcpu,
&vcpu_timer_advance_ns_fops);
if (kvm_has_tsc_control) {
ret = debugfs_create_file("tsc-scaling-ratio", 0444,
vcpu->debugfs_dentry,
vcpu, &vcpu_tsc_scaling_fops);
if (!ret)
return -ENOMEM;
ret = debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444,
vcpu->debugfs_dentry,
vcpu, &vcpu_tsc_scaling_frac_fops);
if (!ret)
return -ENOMEM;
debugfs_create_file("tsc-scaling-ratio", 0444,
vcpu->debugfs_dentry, vcpu,
&vcpu_tsc_scaling_fops);
debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444,
vcpu->debugfs_dentry, vcpu,
&vcpu_tsc_scaling_frac_fops);
}
return 0;
}

View File

@ -1548,7 +1548,6 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
static void apic_timer_expired(struct kvm_lapic *apic)
{
struct kvm_vcpu *vcpu = apic->vcpu;
struct swait_queue_head *q = &vcpu->wq;
struct kvm_timer *ktimer = &apic->lapic_timer;
if (atomic_read(&apic->lapic_timer.pending))
@ -1566,13 +1565,6 @@ static void apic_timer_expired(struct kvm_lapic *apic)
atomic_inc(&apic->lapic_timer.pending);
kvm_set_pending_timer(vcpu);
/*
* For x86, the atomic_inc() is serialized, thus
* using swait_active() is safe.
*/
if (swait_active(q))
swake_up_one(q);
}
static void start_sw_tscdeadline(struct kvm_lapic *apic)

View File

@ -5190,6 +5190,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
kvm_vcpu_wake_up(vcpu);
}
static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return false;
}
static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
{
unsigned long flags;
@ -7314,6 +7319,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.pmu_ops = &amd_pmu_ops,
.deliver_posted_interrupt = svm_deliver_avic_intr,
.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,

View File

@ -6117,6 +6117,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
return max_irr;
}
static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return pi_test_on(vcpu_to_pi_desc(vcpu));
}
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
{
if (!kvm_vcpu_apicv_active(vcpu))
@ -7726,6 +7731,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
.dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
.set_tss_addr = vmx_set_tss_addr,
.set_identity_map_addr = vmx_set_identity_map_addr,

View File

@ -9698,6 +9698,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
}
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
{
if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
return true;
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
kvm_test_request(KVM_REQ_SMI, vcpu) ||
kvm_test_request(KVM_REQ_EVENT, vcpu))
return true;
if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
return true;
return false;
}
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return vcpu->arch.preempted_in_kernel;

View File

@ -34,6 +34,7 @@ struct kvm_pmu {
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
@ -71,6 +72,7 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}

View File

@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
void kvm_vgic_load(struct kvm_vcpu *vcpu);
void kvm_vgic_put(struct kvm_vcpu *vcpu);
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)

View File

@ -861,8 +861,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
bool kvm_arch_has_vcpu_debugfs(void);
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
#endif
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
@ -872,6 +873,7 @@ int kvm_arch_check_processor_compat(void);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*

View File

@ -1,7 +1,7 @@
/s390x/sync_regs_test
/x86_64/cr4_cpuid_sync_test
/x86_64/evmcs_test
/x86_64/hyperv_cpuid
/x86_64/kvm_create_max_vcpus
/x86_64/mmio_warning_test
/x86_64/platform_info_test
/x86_64/set_sregs_test
@ -13,3 +13,4 @@
/x86_64/vmx_tsc_adjust_test
/clear_dirty_log_test
/dirty_log_test
/kvm_create_max_vcpus

View File

@ -0,0 +1,3 @@
CONFIG_KVM=y
CONFIG_KVM_INTEL=y
CONFIG_KVM_AMD=y

View File

@ -144,11 +144,6 @@ out_fail_alloc:
return ret;
}
bool kvm_arch_has_vcpu_debugfs(void)
{
return false;
}
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
return 0;
@ -323,6 +318,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
/*
* If we're about to block (most likely because we've just hit a
* WFI), we need to sync back the state of the GIC CPU interface
* so that we have the lastest PMR and group enables. This ensures
* that kvm_arch_vcpu_runnable has up-to-date data to decide
* whether we have pending interrupts.
*/
preempt_disable();
kvm_vgic_vmcr_sync(vcpu);
preempt_enable();
kvm_vgic_v4_enable_doorbell(vcpu);
}
@ -340,6 +346,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/* Set up the timer */
kvm_timer_vcpu_init(vcpu);
kvm_pmu_vcpu_init(vcpu);
kvm_arm_reset_debug_ptr(vcpu);
return kvm_vgic_vcpu_init(vcpu);

View File

@ -349,8 +349,10 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
case 7:
cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
/* Fall through */
case 6:
cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
/* Fall through */
default:
cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
}
@ -359,8 +361,10 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
case 7:
cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
/* Fall through */
case 6:
cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
/* Fall through */
default:
cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
}
@ -382,8 +386,10 @@ void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
case 7:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
/* Fall through */
case 6:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
/* Fall through */
default:
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
}
@ -392,8 +398,10 @@ void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
case 7:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
/* Fall through */
case 6:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
/* Fall through */
default:
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
}

View File

@ -214,6 +214,20 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
kvm_pmu_release_perf_event(pmc);
}
/**
* kvm_pmu_vcpu_init - assign pmu counter idx for cpu
* @vcpu: The vcpu pointer
*
*/
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_pmu *pmu = &vcpu->arch.pmu;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
pmu->pmc[i].idx = i;
}
/**
* kvm_pmu_vcpu_reset - reset pmu state for cpu
* @vcpu: The vcpu pointer
@ -224,10 +238,8 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
int i;
struct kvm_pmu *pmu = &vcpu->arch.pmu;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
pmu->pmc[i].idx = i;
}
bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
}

View File

@ -113,6 +113,22 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (vgic_irq_is_mapped_level(irq)) {
bool was_high = irq->line_level;
/*
* We need to update the state of the interrupt because
* the guest might have changed the state of the device
* while the interrupt was disabled at the VGIC level.
*/
irq->line_level = vgic_get_phys_line_level(irq);
/*
* Deactivate the physical interrupt so the GIC will let
* us know when it is asserted again.
*/
if (!irq->active && was_high && !irq->line_level)
vgic_irq_set_phys_active(irq, false);
}
irq->enabled = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);

View File

@ -484,10 +484,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
kvm_vgic_global_state.vctrl_base + GICH_APR);
}
void vgic_v2_put(struct kvm_vcpu *vcpu)
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
}
void vgic_v2_put(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vgic_v2_vmcr_sync(vcpu);
cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
}

View File

@ -662,12 +662,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
__vgic_v3_activate_traps(vcpu);
}
void vgic_v3_put(struct kvm_vcpu *vcpu)
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
if (likely(cpu_if->vgic_sre))
cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
}
void vgic_v3_put(struct kvm_vcpu *vcpu)
{
vgic_v3_vmcr_sync(vcpu);
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);

View File

@ -919,6 +919,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
vgic_v3_put(vcpu);
}
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
{
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
return;
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_vmcr_sync(vcpu);
else
vgic_v3_vmcr_sync(vcpu);
}
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;

View File

@ -193,6 +193,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
void vgic_v2_init_lrs(void);
void vgic_v2_load(struct kvm_vcpu *vcpu);
void vgic_v2_put(struct kvm_vcpu *vcpu);
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@ -223,6 +224,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
void vgic_v3_load(struct kvm_vcpu *vcpu);
void vgic_v3_put(struct kvm_vcpu *vcpu);
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void);

View File

@ -1855,8 +1855,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn)
if (!kvm_is_reserved_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page))
SetPageDirty(page);
SetPageDirty(page);
}
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
@ -2477,6 +2476,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
#endif
}
/*
* Unlike kvm_arch_vcpu_runnable, this function is called outside
* a vcpu_load/vcpu_put pair. However, for most architectures
* kvm_arch_vcpu_runnable does not require vcpu_load.
*/
bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_runnable(vcpu);
}
static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
{
if (kvm_arch_dy_runnable(vcpu))
return true;
#ifdef CONFIG_KVM_ASYNC_PF
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
#endif
return false;
}
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
@ -2506,9 +2528,10 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue;
if (vcpu == me)
continue;
if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
continue;
if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
!kvm_arch_vcpu_in_kernel(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;
@ -2591,30 +2614,20 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
}
static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
char dir_name[ITOA_MAX_LEN * 2];
int ret;
if (!kvm_arch_has_vcpu_debugfs())
return 0;
if (!debugfs_initialized())
return 0;
return;
snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
vcpu->kvm->debugfs_dentry);
if (!vcpu->debugfs_dentry)
return -ENOMEM;
vcpu->kvm->debugfs_dentry);
ret = kvm_arch_create_vcpu_debugfs(vcpu);
if (ret < 0) {
debugfs_remove_recursive(vcpu->debugfs_dentry);
return ret;
}
return 0;
kvm_arch_create_vcpu_debugfs(vcpu);
#endif
}
/*
@ -2649,9 +2662,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (r)
goto vcpu_destroy;
r = kvm_create_vcpu_debugfs(vcpu);
if (r)
goto vcpu_destroy;
kvm_create_vcpu_debugfs(vcpu);
mutex_lock(&kvm->lock);
if (kvm_get_vcpu_by_id(kvm, id)) {
@ -4205,7 +4216,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
vcpu->preempted = false;
WRITE_ONCE(vcpu->preempted, false);
WRITE_ONCE(vcpu->ready, false);
kvm_arch_sched_in(vcpu, cpu);
@ -4219,7 +4230,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
if (current->state == TASK_RUNNING) {
vcpu->preempted = true;
WRITE_ONCE(vcpu->preempted, true);
WRITE_ONCE(vcpu->ready, true);
}
kvm_arch_vcpu_put(vcpu);