1
0
Fork 0

clocksource/drivers/hyper-v: Untangle stimers and timesync from clocksources

hyperv_timer.c exports hyperv_cs, which is used by stimers and the
timesync mechanism.  However, the clocksource dependency is not
needed: these mechanisms only depend on the partition reference
counter (which can be read via a MSR or via the TSC Reference Page).

Introduce the (function) pointer hv_read_reference_counter, as an
embodiment of the partition reference counter read, and export it
in place of the hyperv_cs pointer.  The latter can be removed.

This should clarify that there's no relationship between Hyper-V
stimers & timesync and the Linux clocksource abstractions.  No
functional or semantic change.

Suggested-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Andrea Parri <parri.andrea@gmail.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Link: https://lore.kernel.org/r/20200109160650.16150-2-parri.andrea@gmail.com
alistair/sensors
Andrea Parri 2020-01-09 17:06:49 +01:00 committed by Daniel Lezcano
parent b9c60a741f
commit 0af3e137c1
3 changed files with 28 additions and 18 deletions

View File

@ -66,7 +66,7 @@ static int hv_ce_set_next_event(unsigned long delta,
{
u64 current_tick;
current_tick = hyperv_cs->read(NULL);
current_tick = hv_read_reference_counter();
current_tick += delta;
hv_init_timer(0, current_tick);
return 0;
@ -304,8 +304,8 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup);
* Hyper-V and 32-bit x86. The TSC reference page version is preferred.
*/
struct clocksource *hyperv_cs;
EXPORT_SYMBOL_GPL(hyperv_cs);
u64 (*hv_read_reference_counter)(void);
EXPORT_SYMBOL_GPL(hv_read_reference_counter);
static union {
struct ms_hyperv_tsc_page page;
@ -318,7 +318,7 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
}
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
static u64 notrace read_hv_clock_tsc(void)
{
u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
@ -328,9 +328,14 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
return current_tick;
}
static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
{
return read_hv_clock_tsc();
}
static u64 read_hv_sched_clock_tsc(void)
{
return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
return read_hv_clock_tsc() - hv_sched_clock_offset;
}
static void suspend_hv_clock_tsc(struct clocksource *arg)
@ -359,14 +364,14 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
static struct clocksource hyperv_cs_tsc = {
.name = "hyperv_clocksource_tsc_page",
.rating = 400,
.read = read_hv_clock_tsc,
.read = read_hv_clock_tsc_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend= suspend_hv_clock_tsc,
.resume = resume_hv_clock_tsc,
};
static u64 notrace read_hv_clock_msr(struct clocksource *arg)
static u64 notrace read_hv_clock_msr(void)
{
u64 current_tick;
/*
@ -378,15 +383,20 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg)
return current_tick;
}
static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
{
return read_hv_clock_msr();
}
static u64 read_hv_sched_clock_msr(void)
{
return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
return read_hv_clock_msr() - hv_sched_clock_offset;
}
static struct clocksource hyperv_cs_msr = {
.name = "hyperv_clocksource_msr",
.rating = 400,
.read = read_hv_clock_msr,
.read = read_hv_clock_msr_cs,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@ -399,7 +409,7 @@ static bool __init hv_init_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return false;
hyperv_cs = &hyperv_cs_tsc;
hv_read_reference_counter = read_hv_clock_tsc;
phys_addr = virt_to_phys(hv_get_tsc_page());
/*
@ -417,7 +427,7 @@ static bool __init hv_init_tsc_clocksource(void)
hv_set_clocksource_vdso(hyperv_cs_tsc);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_tsc);
return true;
@ -439,10 +449,10 @@ void __init hv_init_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE))
return;
hyperv_cs = &hyperv_cs_msr;
hv_read_reference_counter = read_hv_clock_msr;
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
hv_sched_clock_offset = hv_read_reference_counter();
hv_setup_sched_clock(read_hv_sched_clock_msr);
}
EXPORT_SYMBOL_GPL(hv_init_clocksource);

View File

@ -211,7 +211,7 @@ static struct timespec64 hv_get_adj_host_time(void)
unsigned long flags;
spin_lock_irqsave(&host_ts.lock, flags);
reftime = hyperv_cs->read(hyperv_cs);
reftime = hv_read_reference_counter();
newtime = host_ts.host_time + (reftime - host_ts.ref_time);
ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
spin_unlock_irqrestore(&host_ts.lock, flags);
@ -250,7 +250,7 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
*/
spin_lock_irqsave(&host_ts.lock, flags);
cur_reftime = hyperv_cs->read(hyperv_cs);
cur_reftime = hv_read_reference_counter();
host_ts.host_time = hosttime;
host_ts.ref_time = cur_reftime;
@ -315,7 +315,7 @@ static void timesync_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
adj_guesttime(timedatap->parenttime,
hyperv_cs->read(hyperv_cs),
hv_read_reference_counter(),
timedatap->flags);
}
}
@ -524,7 +524,7 @@ static struct ptp_clock *hv_ptp_clock;
static int hv_timesync_init(struct hv_util_service *srv)
{
/* TimeSync requires Hyper-V clocksource. */
if (!hyperv_cs)
if (!hv_read_reference_counter)
return -ENODEV;
spin_lock_init(&host_ts.lock);

View File

@ -30,7 +30,7 @@ extern void hv_stimer_global_cleanup(void);
extern void hv_stimer0_isr(void);
#ifdef CONFIG_HYPERV_TIMER
extern struct clocksource *hyperv_cs;
extern u64 (*hv_read_reference_counter)(void);
extern void hv_init_clocksource(void);
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);