1
0
Fork 0

KVM: PPC: Book3S HV: Add missing barriers to XIVE code and document them

This adds missing memory barriers to order updates/tests of
the virtual CPPR and MFRR, thus fixing a lost IPI problem.

While at it also document all barriers in this file.

This fixes a bug causing guest IPIs to occasionally get lost.  The
symptom then is hangs or stalls in the guest.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Tested-by: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
hifive-unleashed-5.1
Benjamin Herrenschmidt 2017-08-18 12:10:58 +10:00 committed by Paul Mackerras
parent 2c4fb78f78
commit bb9b52bd51
1 changed files with 55 additions and 2 deletions

View File

@ -16,6 +16,12 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
u8 cppr;
u16 ack;
/*
* Ensure any previous store to CPPR is ordered vs.
* the subsequent loads from PIPR or ACK.
*/
eieio();
/*
* DD1 bug workaround: If PIPR is less favored than CPPR
* ignore the interrupt or we might incorrectly lose an IPB
@ -244,6 +250,11 @@ skip_ipi:
/*
* If we found an interrupt, adjust what the guest CPPR should
* be as if we had just fetched that interrupt from HW.
*
* Note: This can only make xc->cppr smaller as the previous
* loop will only exit with hirq != 0 if prio is lower than
* the current xc->cppr. Thus we don't need to re-check xc->mfrr
* for pending IPIs.
*/
if (hirq)
xc->cppr = prio;
@ -389,6 +400,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
old_cppr = xc->cppr;
xc->cppr = cppr;
/*
* Order the above update of xc->cppr with the subsequent
* read of xc->mfrr inside push_pending_to_hw()
*/
smp_mb();
/*
* We are masking less, we need to look for pending things
* to deliver and set VP pending bits accordingly to trigger
@ -429,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
* used to signal MFRR changes is EOId when fetched from
* the queue.
*/
if (irq == XICS_IPI || irq == 0)
if (irq == XICS_IPI || irq == 0) {
/*
* This barrier orders the setting of xc->cppr vs.
* subsquent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw
*/
smp_mb();
goto bail;
}
/* Find interrupt source */
sb = kvmppc_xive_find_source(xive, irq, &src);
if (!sb) {
pr_devel(" source not found !\n");
rc = H_PARAMETER;
/* Same as above */
smp_mb();
goto bail;
}
state = &sb->irq_state[src];
kvmppc_xive_select_irq(state, &hw_num, &xd);
state->in_eoi = true;
mb();
/*
* This barrier orders both setting of in_eoi above vs,
* subsequent test of guest_priority, and the setting
* of xc->cppr vs. subsquent test of xc->mfrr done inside
* scan_interrupts and push_pending_to_hw
*/
smp_mb();
again:
if (state->guest_priority == MASKED) {
@ -470,6 +503,14 @@ again:
}
/*
* This barrier orders the above guest_priority check
* and spin_lock/unlock with clearing in_eoi below.
*
* It also has to be a full mb() as it must ensure
* the MMIOs done in source_eoi() are completed before
* state->in_eoi is visible.
*/
mb();
state->in_eoi = false;
bail:
@ -504,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
/* Locklessly write over MFRR */
xc->mfrr = mfrr;
/*
* The load of xc->cppr below and the subsequent MMIO store
* to the IPI must happen after the above mfrr update is
* globally visible so that:
*
* - Synchronize with another CPU doing an H_EOI or a H_CPPR
* updating xc->cppr then reading xc->mfrr.
*
* - The target of the IPI sees the xc->mfrr update
*/
mb();
/* Shoot the IPI if most favored than target cppr */
if (mfrr < xc->cppr)
__x_writeq(0, __x_trig_page(&xc->vp_ipi_data));