1
0
Fork 0

x86/kvm/kvmclock: Convert to hotplug state machine

Install the callbacks via the state machine and let the core invoke
the callbacks on the already online CPUs.

We assumed that the priority ordering was ment to invoke the online
callback as the last step. In the original code this also invoked the
down prepare callback as the last step. With the symmetric state
machine the down prepare callback is now the first step.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153335.542880859@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Sebastian Andrzej Siewior 2016-07-13 17:16:33 +00:00 committed by Ingo Molnar
parent 162e52a117
commit 251a5fd64b
2 changed files with 9 additions and 25 deletions

View File

@ -5552,9 +5552,10 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
}
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
static void tsc_bad(void *info)
static int kvmclock_cpu_down_prep(unsigned int cpu)
{
__this_cpu_write(cpu_tsc_khz, 0);
return 0;
}
static void tsc_khz_changed(void *data)
@ -5659,33 +5660,18 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
.notifier_call = kvmclock_cpufreq_notifier
};
static int kvmclock_cpu_notifier(struct notifier_block *nfb,
unsigned long action, void *hcpu)
static int kvmclock_cpu_online(unsigned int cpu)
{
switch (action) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
tsc_khz_changed(NULL);
break;
case CPU_DOWN_PREPARE:
tsc_bad(NULL);
break;
}
return NOTIFY_OK;
tsc_khz_changed(NULL);
return 0;
}
static struct notifier_block kvmclock_cpu_notifier_block = {
.notifier_call = kvmclock_cpu_notifier,
.priority = -INT_MAX
};
static void kvm_timer_init(void)
{
int cpu;
max_tsc_khz = tsc_khz;
cpu_notifier_register_begin();
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy policy;
@ -5700,12 +5686,9 @@ static void kvm_timer_init(void)
CPUFREQ_TRANSITION_NOTIFIER);
}
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
for_each_online_cpu(cpu)
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
__register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
cpu_notifier_register_done();
cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE",
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
@ -5894,7 +5877,7 @@ void kvm_arch_exit(void)
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
#endif

View File

@ -56,6 +56,7 @@ enum cpuhp_state {
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
CPUHP_ONLINE,
};