1
0
Fork 0

KVM: arm64: timers: Move timer registers to the sys_regs file

Move the timer gsisters to the sysreg file. This will further help when
they are directly changed by a nesting hypervisor in the VNCR page.

This requires moving the initialisation of the timer struct so that some
of the helpers (such as arch_timer_ctx_index) can work correctly at an
early stage.

Signed-off-by: Marc Zyngier <maz@kernel.org>
zero-sugar-mainline-defconfig
Marc Zyngier 2019-06-28 15:23:43 +01:00
parent 3c5ff0c60f
commit 41ce82f63c
4 changed files with 136 additions and 44 deletions

View File

@ -189,6 +189,12 @@ enum vcpu_sysreg {
SP_EL1,
SPSR_EL1,
CNTVOFF_EL2,
CNTV_CVAL_EL0,
CNTV_CTL_EL0,
CNTP_CVAL_EL0,
CNTP_CTL_EL0,
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */

View File

@ -51,6 +51,93 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg);
u32 timer_get_ctl(struct arch_timer_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
case TIMER_PTIMER:
return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
default:
WARN_ON(1);
return 0;
}
}
u64 timer_get_cval(struct arch_timer_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
case TIMER_PTIMER:
return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
default:
WARN_ON(1);
return 0;
}
}
static u64 timer_get_offset(struct arch_timer_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
default:
return 0;
}
}
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
break;
case TIMER_PTIMER:
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
break;
default:
WARN_ON(1);
}
}
static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
break;
case TIMER_PTIMER:
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
break;
default:
WARN_ON(1);
}
}
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
break;
default:
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
}
}
u64 kvm_phys_timer_read(void)
{
return timecounter->cc->read(timecounter->cc);
@ -124,8 +211,8 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
{
u64 cval, now;
cval = timer_ctx->cnt_cval;
now = kvm_phys_timer_read() - timer_ctx->cntvoff;
cval = timer_get_cval(timer_ctx);
now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
if (now < cval) {
u64 ns;
@ -144,8 +231,8 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
WARN_ON(timer_ctx && timer_ctx->loaded);
return timer_ctx &&
!(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
((timer_get_ctl(timer_ctx) &
(ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
}
/*
@ -256,8 +343,8 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
if (!kvm_timer_irq_can_fire(timer_ctx))
return false;
cval = timer_ctx->cnt_cval;
now = kvm_phys_timer_read() - timer_ctx->cntvoff;
cval = timer_get_cval(timer_ctx);
now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
return cval <= now;
}
@ -350,8 +437,8 @@ static void timer_save_state(struct arch_timer_context *ctx)
switch (index) {
case TIMER_VTIMER:
ctx->cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
ctx->cnt_cval = read_sysreg_el0(SYS_CNTV_CVAL);
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
/* Disable the timer */
write_sysreg_el0(0, SYS_CNTV_CTL);
@ -359,8 +446,8 @@ static void timer_save_state(struct arch_timer_context *ctx)
break;
case TIMER_PTIMER:
ctx->cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
ctx->cnt_cval = read_sysreg_el0(SYS_CNTP_CVAL);
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
/* Disable the timer */
write_sysreg_el0(0, SYS_CNTP_CTL);
@ -429,14 +516,14 @@ static void timer_restore_state(struct arch_timer_context *ctx)
switch (index) {
case TIMER_VTIMER:
write_sysreg_el0(ctx->cnt_cval, SYS_CNTV_CVAL);
write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
isb();
write_sysreg_el0(ctx->cnt_ctl, SYS_CNTV_CTL);
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
break;
case TIMER_PTIMER:
write_sysreg_el0(ctx->cnt_cval, SYS_CNTP_CVAL);
write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
isb();
write_sysreg_el0(ctx->cnt_ctl, SYS_CNTP_CTL);
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
break;
case NR_KVM_TIMERS:
BUG();
@ -528,7 +615,7 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_load_nogic(vcpu);
}
set_cntvoff(map.direct_vtimer->cntvoff);
set_cntvoff(timer_get_offset(map.direct_vtimer));
kvm_timer_unblocking(vcpu);
@ -639,8 +726,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
* resets the timer to be disabled and unmasked and is compliant with
* the ARMv7 architecture.
*/
vcpu_vtimer(vcpu)->cnt_ctl = 0;
vcpu_ptimer(vcpu)->cnt_ctl = 0;
timer_set_ctl(vcpu_vtimer(vcpu), 0);
timer_set_ctl(vcpu_ptimer(vcpu), 0);
if (timer->enabled) {
kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
@ -668,13 +755,13 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, tmp, kvm)
vcpu_vtimer(tmp)->cntvoff = cntvoff;
timer_set_offset(vcpu_vtimer(tmp), cntvoff);
/*
* When called from the vcpu create path, the CPU being created is not
* included in the loop above, so we just set it here as well.
*/
vcpu_vtimer(vcpu)->cntvoff = cntvoff;
timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
mutex_unlock(&kvm->lock);
}
@ -684,9 +771,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
vtimer->vcpu = vcpu;
ptimer->vcpu = vcpu;
/* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
ptimer->cntvoff = 0;
timer_set_offset(ptimer, 0);
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
timer->bg_timer.function = kvm_bg_timer_expire;
@ -704,9 +794,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
vtimer->vcpu = vcpu;
ptimer->vcpu = vcpu;
}
static void kvm_timer_init_interrupt(void *info)
@ -756,10 +843,12 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
* UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
* regardless of ENABLE bit for our implementation convenience.
*/
u32 ctl = timer_get_ctl(timer);
if (!kvm_timer_compute_delta(timer))
return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT;
else
return timer->cnt_ctl;
ctl |= ARCH_TIMER_CTRL_IT_STAT;
return ctl;
}
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
@ -795,8 +884,8 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
switch (treg) {
case TIMER_REG_TVAL:
val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
val &= lower_32_bits(val);
val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
val = lower_32_bits(val);
break;
case TIMER_REG_CTL:
@ -804,11 +893,11 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
break;
case TIMER_REG_CVAL:
val = timer->cnt_cval;
val = timer_get_cval(timer);
break;
case TIMER_REG_CNT:
val = kvm_phys_timer_read() - timer->cntvoff;
val = kvm_phys_timer_read() - timer_get_offset(timer);
break;
default:
@ -842,15 +931,15 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
{
switch (treg) {
case TIMER_REG_TVAL:
timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
break;
case TIMER_REG_CTL:
timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT;
timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
break;
case TIMER_REG_CVAL:
timer->cnt_cval = val;
timer_set_cval(timer, val);
break;
default:

View File

@ -301,8 +301,8 @@ TRACE_EVENT(kvm_timer_save_state,
),
TP_fast_assign(
__entry->ctl = ctx->cnt_ctl;
__entry->cval = ctx->cnt_cval;
__entry->ctl = timer_get_ctl(ctx);
__entry->cval = timer_get_cval(ctx);
__entry->timer_idx = arch_timer_ctx_index(ctx);
),
@ -323,8 +323,8 @@ TRACE_EVENT(kvm_timer_restore_state,
),
TP_fast_assign(
__entry->ctl = ctx->cnt_ctl;
__entry->cval = ctx->cnt_cval;
__entry->ctl = timer_get_ctl(ctx);
__entry->cval = timer_get_cval(ctx);
__entry->timer_idx = arch_timer_ctx_index(ctx);
),

View File

@ -26,16 +26,9 @@ enum kvm_arch_timer_regs {
struct arch_timer_context {
struct kvm_vcpu *vcpu;
/* Registers: control register, timer value */
u32 cnt_ctl;
u64 cnt_cval;
/* Timer IRQ */
struct kvm_irq_level irq;
/* Virtual offset */
u64 cntvoff;
/* Emulated Timer (may be unused) */
struct hrtimer hrtimer;
@ -109,4 +102,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timer_regs treg,
u64 val);
/* Needed for tracing */
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);
#endif