| From aa76829171e98bd75a0cc00b6248eca269ac7f4f Mon Sep 17 00:00:00 2001 |
| From: Eric Auger <eric.auger@redhat.com> |
| Date: Fri, 24 Jan 2020 15:25:34 +0100 |
| Subject: KVM: arm64: pmu: Fix chained SW_INCR counters |
| |
| From: Eric Auger <eric.auger@redhat.com> |
| |
| commit aa76829171e98bd75a0cc00b6248eca269ac7f4f upstream. |
| |
| At the moment a SW_INCR counter always overflows on 32-bit |
| boundary, independently on whether the n+1th counter is |
| programmed as CHAIN. |
| |
| Check whether the SW_INCR counter is a 64b counter and if so, |
| implement the 64b logic. |
| |
| Fixes: 80f393a23be6 ("KVM: arm/arm64: Support chained PMU counters") |
| Signed-off-by: Eric Auger <eric.auger@redhat.com> |
| Signed-off-by: Marc Zyngier <maz@kernel.org> |
| Link: https://lore.kernel.org/r/20200124142535.29386-4-eric.auger@redhat.com |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| virt/kvm/arm/pmu.c | 43 ++++++++++++++++++++++++++++++------------- |
| 1 file changed, 30 insertions(+), 13 deletions(-) |
| |
| --- a/virt/kvm/arm/pmu.c |
| +++ b/virt/kvm/arm/pmu.c |
| @@ -480,28 +480,45 @@ static void kvm_pmu_perf_overflow(struct |
| */ |
| void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) |
| { |
| + struct kvm_pmu *pmu = &vcpu->arch.pmu; |
| int i; |
| - u64 type, enable, reg; |
| - |
| - if (val == 0) |
| - return; |
| |
| if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) |
| return; |
| |
| - enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); |
| + /* Weed out disabled counters */ |
| + val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); |
| + |
| for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) { |
| + u64 type, reg; |
| + |
| if (!(val & BIT(i))) |
| continue; |
| - type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) |
| - & ARMV8_PMU_EVTYPE_EVENT; |
| - if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR) |
| - && (enable & BIT(i))) { |
| - reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; |
| + |
| + /* PMSWINC only applies to ... SW_INC! */ |
| + type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i); |
| + type &= ARMV8_PMU_EVTYPE_EVENT; |
| + if (type != ARMV8_PMUV3_PERFCTR_SW_INCR) |
| + continue; |
| + |
| + /* increment this even SW_INC counter */ |
| + reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; |
| + reg = lower_32_bits(reg); |
| + __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; |
| + |
| + if (reg) /* no overflow on the low part */ |
| + continue; |
| + |
| + if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) { |
| + /* increment the high counter */ |
| + reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1; |
| reg = lower_32_bits(reg); |
| - __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; |
| - if (!reg) |
| - __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); |
| + __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg; |
| + if (!reg) /* mark overflow on the high counter */ |
| + __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1); |
| + } else { |
| + /* mark overflow on low counter */ |
| + __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); |
| } |
| } |
| } |