KVM: arm64: pmu: Fix chained SW_INCR counters
commit5.4-rM2-2.2.x-imx-squashedaa76829171
upstream. At the moment a SW_INCR counter always overflows on 32-bit boundary, independently on whether the n+1th counter is programmed as CHAIN. Check whether the SW_INCR counter is a 64b counter and if so, implement the 64b logic. Fixes:80f393a23b
("KVM: arm/arm64: Support chained PMU counters") Signed-off-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20200124142535.29386-4-eric.auger@redhat.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
parent
a6229d1b5c
commit
a17d216404
|
@ -480,28 +480,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
||||||
*/
|
*/
|
||||||
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
||||||
{
|
{
|
||||||
|
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||||
int i;
|
int i;
|
||||||
u64 type, enable, reg;
|
|
||||||
|
|
||||||
if (val == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
|
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
/* Weed out disabled counters */
|
||||||
|
val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||||
|
|
||||||
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
|
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
|
||||||
|
u64 type, reg;
|
||||||
|
|
||||||
if (!(val & BIT(i)))
|
if (!(val & BIT(i)))
|
||||||
continue;
|
continue;
|
||||||
type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
|
|
||||||
& ARMV8_PMU_EVTYPE_EVENT;
|
/* PMSWINC only applies to ... SW_INC! */
|
||||||
if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
|
type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
|
||||||
&& (enable & BIT(i))) {
|
type &= ARMV8_PMU_EVTYPE_EVENT;
|
||||||
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
|
if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* increment this even SW_INC counter */
|
||||||
|
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
|
||||||
|
reg = lower_32_bits(reg);
|
||||||
|
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
|
||||||
|
|
||||||
|
if (reg) /* no overflow on the low part */
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
|
||||||
|
/* increment the high counter */
|
||||||
|
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
|
||||||
reg = lower_32_bits(reg);
|
reg = lower_32_bits(reg);
|
||||||
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
|
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
|
||||||
if (!reg)
|
if (!reg) /* mark overflow on the high counter */
|
||||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
|
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
|
||||||
|
} else {
|
||||||
|
/* mark overflow on low counter */
|
||||||
|
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue