1
0
Fork 0

x86/hyperv: Enable PV qspinlock for Hyper-V

Implement the required wait and kick callbacks to support PV spinlocks in
Hyper-V guests.

[ tglx: Document the requirement for disabling interrupts in the wait()
  	callback. Remove goto and unnecessary includes. Add prototype
	for hv_vcpu_is_preempted(). Adapted to pending paravirt changes. ]

Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Juergen Gross <jgross@suse.com>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Michael Kelley (EOSG) <Michael.H.Kelley@microsoft.com>
Cc: chao.p.peng@intel.com
Cc: chao.gao@intel.com
Cc: isaku.yamahata@intel.com
Cc: tianyu.lan@microsoft.com
Link: https://lkml.kernel.org/r/1538987374-51217-3-git-send-email-yi.y.sun@linux.intel.com
hifive-unleashed-5.1
Yi Sun 2018-10-08 16:29:34 +08:00 committed by Thomas Gleixner
parent f726c4620d
commit 3a025de64b
5 changed files with 113 additions and 0 deletions

View File

@ -1385,6 +1385,11 @@
hvc_iucv_allow= [S390] Comma-separated list of z/VM user IDs. hvc_iucv_allow= [S390] Comma-separated list of z/VM user IDs.
If specified, z/VM IUCV HVC accepts connections If specified, z/VM IUCV HVC accepts connections
from listed z/VM user IDs only. from listed z/VM user IDs only.
hv_nopvspin [X86,HYPER_V] Disables the paravirt spinlock optimizations
which allow the hypervisor to 'idle' the
guest on lock contention.
keep_bootcon [KNL] keep_bootcon [KNL]
Do not unregister boot console at start. This is only Do not unregister boot console at start. This is only
useful for debugging when something happens in the window useful for debugging when something happens in the window

View File

@ -1,2 +1,6 @@
obj-y := hv_init.o mmu.o nested.o obj-y := hv_init.o mmu.o nested.o
obj-$(CONFIG_X86_64) += hv_apic.o obj-$(CONFIG_X86_64) += hv_apic.o
ifdef CONFIG_X86_64
obj-$(CONFIG_PARAVIRT_SPINLOCKS) += hv_spinlock.o
endif

View File

@ -0,0 +1,88 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Hyper-V specific spinlock code.
*
* Copyright (C) 2018, Intel, Inc.
*
* Author : Yi Sun <yi.y.sun@intel.com>
*/
#define pr_fmt(fmt) "Hyper-V: " fmt
#include <linux/spinlock.h>
#include <asm/mshyperv.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
static bool __initdata hv_pvspin = true;
static void hv_qlock_kick(int cpu)
{
apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
}
static void hv_qlock_wait(u8 *byte, u8 val)
{
unsigned long msr_val;
unsigned long flags;
if (in_nmi())
return;
/*
* Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
* vCPU can be put into 'idle' state. This 'idle' state is
* terminated by an IPI, usually from hv_qlock_kick(), even if
* interrupts are disabled on the vCPU.
*
* To prevent a race against the unlock path it is required to
* disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
* MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
* the lock value check and the rdmsrl() then the vCPU might be put
* into 'idle' state by the hypervisor and kept in that state for
* an unspecified amount of time.
*/
local_irq_save(flags);
/*
* Only issue the rdmsrl() when the lock state has not changed.
*/
if (READ_ONCE(*byte) == val)
rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
local_irq_restore(flags);
}
/*
* Hyper-V does not support this so far.
*/
bool hv_vcpu_is_preempted(int vcpu)
{
return false;
}
PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
void __init hv_init_spinlocks(void)
{
if (!hv_pvspin || !apic ||
!(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
!(ms_hyperv.features & HV_X64_MSR_GUEST_IDLE_AVAILABLE)) {
pr_info("PV spinlocks disabled\n");
return;
}
pr_info("PV spinlocks enabled\n");
__pv_init_lock_hash();
pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_ops.lock.wait = hv_qlock_wait;
pv_ops.lock.kick = hv_qlock_kick;
pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
}
static __init int hv_parse_nopvspin(char *arg)
{
hv_pvspin = false;
return 0;
}
early_param("hv_nopvspin", hv_parse_nopvspin);

View File

@ -351,6 +351,8 @@ int hyperv_flush_guest_mapping(u64 as);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
void hv_apic_init(void); void hv_apic_init(void);
void __init hv_init_spinlocks(void);
bool hv_vcpu_is_preempted(int vcpu);
#else #else
static inline void hv_apic_init(void) {} static inline void hv_apic_init(void) {}
#endif #endif

View File

@ -199,6 +199,16 @@ static unsigned long hv_get_tsc_khz(void)
return freq / 1000; return freq / 1000;
} }
#if defined(CONFIG_SMP) && IS_ENABLED(CONFIG_HYPERV)
static void __init hv_smp_prepare_boot_cpu(void)
{
native_smp_prepare_boot_cpu();
#if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_SPINLOCKS)
hv_init_spinlocks();
#endif
}
#endif
static void __init ms_hyperv_init_platform(void) static void __init ms_hyperv_init_platform(void)
{ {
int hv_host_info_eax; int hv_host_info_eax;
@ -303,6 +313,10 @@ static void __init ms_hyperv_init_platform(void)
if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE) if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
alloc_intr_gate(HYPERV_STIMER0_VECTOR, alloc_intr_gate(HYPERV_STIMER0_VECTOR,
hv_stimer0_callback_vector); hv_stimer0_callback_vector);
# ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
# endif
#endif #endif
} }