KVM: s390: fix and feature for kvm/next (4.3)

1. error handling for irq routes
 2. Gracefully handle STP time changes
    s390 supports a protocol for syncing different systems via the stp
    protocol that will steer the TOD clocks to keep all participating
    clocks below the round trip time between the system. In case of
    specific out of sync event Linux can opt-in to accept sync checks.
    This will result in non-monotonic jumps of the TOD clock, which
    Linux will correct via time offsets to keep the wall clock time
    monotonic. Now: KVM guests also base their time on the host TOD,
    so we need to fixup the offset for them as well.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJVyxkOAAoJEBF7vIC1phx86hgP/iJsA8b2ZDw0qb+3aeKfdt/q
 eUIljBDUPHLu0/VfoRuqW7Ibsgz1VjxnsXYZrt97XmK9maZ5RyF1w9upF3Li81/q
 0h2ZDOkFWfyNcN6v3mRLAmf2jFOGHoKt/pPwre3V5LL8nmFnqCrrKWMbGc6yRxeL
 zktd9cm4qAllHjUvwWvD/otKFur960NVb8JaXbdsZaBpCznTChSN2zxs6oOnls+P
 Fq2w7NnGX8lO2O2bfXlRYIs7554Ob+gooTalhncPhGoz/nAd9EqQTx0RtphvNy54
 hVL4xDpM2v51xCJCtoVC5IBequdbiM8+FCrXzwo6eKMQoaDh5jGl8uXikqycsYcp
 ZEpcZrEDmsxtzIK2lCMCHjasCRIUmym8ULeimUFTVgDyXgnPBYR5r6mUkC0lHCue
 sv//HkkrftcfVOnsdE+3UdSdY9tuTHKd4aRiJxq57NwfEtMu6DtY9FHRLgQF34tW
 d8ArlVIniw1H0ZwBB4OVGL4TGORTiQen4cMNJlC5sIJnoe6Il3O67fqB52pkkg0w
 rZ9uZ8VNBVMuwma41bTws7FQkytHpLBBiNCPWC6T6kW3f41dvv26M679B3JPN7ty
 hTkv1CouaWRCydPxlV+EyM4wDBzYn6e3hzbpnJcn71Xqvh9AoOLchUadwmqymOGp
 SzJDPUNfwln1Fv+pMJLl
 =2kz9
 -----END PGP SIGNATURE-----

Merge tag 'kvm-s390-next-20150812' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: fix and feature for kvm/next (4.3)

1. error handling for irq routes
2. Gracefully handle STP time changes
   s390 supports a protocol for syncing different systems via the stp
   protocol that will steer the TOD clocks to keep all participating
   clocks below the round trip time between the system. In case of
   specific out of sync event Linux can opt-in to accept sync checks.
   This will result in non-monotonic jumps of the TOD clock, which
   Linux will correct via time offsets to keep the wall clock time
   monotonic. Now: KVM guests also base their time on the host TOD,
   so we need to fixup the offset for them as well.
This commit is contained in:
Paolo Bonzini 2015-08-13 11:51:50 +02:00
commit ae6c0aa6ae
5 changed files with 66 additions and 6 deletions

View file

@ -214,6 +214,9 @@ static inline int etr_ptff(void *ptff_block, unsigned int func)
void etr_switch_to_local(void);
void etr_sync_check(void);
/* notifier for syncs */
extern struct atomic_notifier_head s390_epoch_delta_notifier;
/* STP interruption parameter */
struct stp_irq_parm {
unsigned int _pad0 : 14;

View file

@ -58,6 +58,9 @@ EXPORT_SYMBOL_GPL(sched_clock_base_cc);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
EXPORT_SYMBOL(s390_epoch_delta_notifier);
/*
* Scheduler clock - returns current time in nanosec units.
*/
@ -752,7 +755,7 @@ static void clock_sync_cpu(struct clock_sync_data *sync)
static int etr_sync_clock(void *data)
{
static int first;
unsigned long long clock, old_clock, delay, delta;
unsigned long long clock, old_clock, clock_delta, delay, delta;
struct clock_sync_data *etr_sync;
struct etr_aib *sync_port, *aib;
int port;
@ -789,6 +792,9 @@ static int etr_sync_clock(void *data)
delay = (unsigned long long)
(aib->edf2.etv - sync_port->edf2.etv) << 32;
delta = adjust_time(old_clock, clock, delay);
clock_delta = clock - old_clock;
atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0,
&clock_delta);
etr_sync->fixup_cc = delta;
fixup_clock_comparator(delta);
/* Verify that the clock is properly set. */
@ -1526,7 +1532,7 @@ void stp_island_check(void)
static int stp_sync_clock(void *data)
{
static int first;
unsigned long long old_clock, delta;
unsigned long long old_clock, delta, new_clock, clock_delta;
struct clock_sync_data *stp_sync;
int rc;
@ -1551,7 +1557,11 @@ static int stp_sync_clock(void *data)
old_clock = get_tod_clock();
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
if (rc == 0) {
delta = adjust_time(old_clock, get_tod_clock(), 0);
new_clock = get_tod_clock();
delta = adjust_time(old_clock, new_clock, 0);
clock_delta = new_clock - old_clock;
atomic_notifier_call_chain(&s390_epoch_delta_notifier,
0, &clock_delta);
fixup_clock_comparator(delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));

View file

@ -71,9 +71,13 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
static int ckc_irq_pending(struct kvm_vcpu *vcpu)
{
preempt_disable();
if (!(vcpu->arch.sie_block->ckc <
get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
preempt_enable();
return 0;
}
preempt_enable();
return ckc_interrupts_enabled(vcpu);
}
@ -856,7 +860,9 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
goto no_timer;
}
preempt_disable();
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
preempt_enable();
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
/* underflow */
@ -895,7 +901,9 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
u64 now, sltime;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
preempt_disable();
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
preempt_enable();
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
/*

View file

@ -28,6 +28,7 @@
#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/etr.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
@ -138,16 +139,47 @@ int kvm_arch_hardware_enable(void)
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
/*
* This callback is executed during stop_machine(). All CPUs are therefore
* temporarily stopped. In order not to change guest behavior, we have to
* disable preemption whenever we touch the epoch of kvm and the VCPUs,
* so a CPU won't be stopped while calculating with the epoch.
*/
static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
void *v)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i;
unsigned long long *delta = v;
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm->arch.epoch -= *delta;
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.sie_block->epoch -= *delta;
}
}
return NOTIFY_OK;
}
static struct notifier_block kvm_clock_notifier = {
.notifier_call = kvm_clock_sync,
};
int kvm_arch_hardware_setup(void)
{
gmap_notifier.notifier_call = kvm_gmap_notifier;
gmap_register_ipte_notifier(&gmap_notifier);
atomic_notifier_chain_register(&s390_epoch_delta_notifier,
&kvm_clock_notifier);
return 0;
}
void kvm_arch_hardware_unsetup(void)
{
gmap_unregister_ipte_notifier(&gmap_notifier);
atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
&kvm_clock_notifier);
}
int kvm_arch_init(void *opaque)
@ -501,11 +533,13 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
return r;
mutex_lock(&kvm->lock);
preempt_disable();
kvm->arch.epoch = gtod - host_tod;
kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
kvm_s390_vcpu_unblock_all(kvm);
preempt_enable();
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
return 0;
@ -553,7 +587,9 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
if (r)
return r;
preempt_disable();
gtod = host_tod + kvm->arch.epoch;
preempt_enable();
if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
return -EFAULT;
VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
@ -926,8 +962,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (kvm->arch.use_irqchip) {
/* Set up dummy routing. */
memset(&routing, 0, sizeof(routing));
kvm_set_irq_routing(kvm, &routing, 0, 0);
r = 0;
r = kvm_set_irq_routing(kvm, &routing, 0, 0);
}
break;
}
@ -1314,7 +1349,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
mutex_lock(&vcpu->kvm->lock);
preempt_disable();
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
preempt_enable();
mutex_unlock(&vcpu->kvm->lock);
if (!kvm_is_ucontrol(vcpu->kvm))
vcpu->arch.gmap = vcpu->kvm->arch.gmap;

View file

@ -57,8 +57,10 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
val = (val - hostclk) & ~0x3fUL;
mutex_lock(&vcpu->kvm->lock);
preempt_disable();
kvm_for_each_vcpu(i, cpup, vcpu->kvm)
cpup->arch.sie_block->epoch = val;
preempt_enable();
mutex_unlock(&vcpu->kvm->lock);
kvm_s390_set_psw_cc(vcpu, 0);