1
0
Fork 0

KVM: arm/arm64: Set a background timer to the earliest timer expiration

When scheduling a background timer, consider both of the virtual and
physical timer and pick the earliest expiration time.

Signed-off-by: Jintack Lim <jintack@cs.columbia.edu>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
hifive-unleashed-5.1
Jintack Lim 2017-02-03 10:20:05 -05:00 committed by Marc Zyngier
parent 58e0c9732a
commit fb280e9757
2 changed files with 42 additions and 14 deletions

View File

@ -300,7 +300,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return kvm_timer_should_fire(vcpu_vtimer(vcpu));
return kvm_timer_should_fire(vcpu_vtimer(vcpu)) ||
kvm_timer_should_fire(vcpu_ptimer(vcpu));
}
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)

View File

@ -118,6 +118,35 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
return 0;
}
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
}
/*
* Returns the earliest expiration time in ns among guest timers.
* Note that it will return 0 if none of timers can fire.
*/
static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
{
u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
if (kvm_timer_irq_can_fire(vtimer))
min_virt = kvm_timer_compute_delta(vtimer);
if (kvm_timer_irq_can_fire(ptimer))
min_phys = kvm_timer_compute_delta(ptimer);
/* If none of timers can fire, then return 0 */
if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX))
return 0;
return min(min_virt, min_phys);
}
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
{
struct arch_timer_cpu *timer;
@ -132,7 +161,7 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
* PoV (NTP on the host may have forced it to expire
* early). If we should have slept longer, restart it.
*/
ns = kvm_timer_compute_delta(vcpu_vtimer(vcpu));
ns = kvm_timer_earliest_exp(vcpu);
if (unlikely(ns)) {
hrtimer_forward_now(hrt, ns_to_ktime(ns));
return HRTIMER_RESTART;
@ -142,12 +171,6 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
return HRTIMER_NORESTART;
}
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
}
bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
{
u64 cval, now;
@ -215,26 +238,30 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
BUG_ON(timer_is_armed(timer));
/*
* No need to schedule a background timer if the guest timer has
* No need to schedule a background timer if any guest timer has
* already expired, because kvm_vcpu_block will return before putting
* the thread to sleep.
*/
if (kvm_timer_should_fire(vtimer))
if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
return;
/*
* If the timer is not capable of raising interrupts (disabled or
* If both timers are not capable of raising interrupts (disabled or
* masked), then there's no more work for us to do.
*/
if (!kvm_timer_irq_can_fire(vtimer))
if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer))
return;
/* The timer has not yet expired, schedule a background timer */
timer_arm(timer, kvm_timer_compute_delta(vtimer));
/*
* The guest timers have not yet expired, schedule a background timer.
* Set the earliest expiration time among the guest timers.
*/
timer_arm(timer, kvm_timer_earliest_exp(vcpu));
}
void kvm_timer_unschedule(struct kvm_vcpu *vcpu)