arm64: perf: Move PMU register related defines to asm/perf_event.h

To use the ARMv8 PMU related register defines from the KVM code, we move
the relevant definitions to asm/perf_event.h header file and rename them
with prefix ARMV8_PMU_. This allows us to get rid of kvm_perf_event.h.

Signed-off-by: Anup Patel <anup.patel@linaro.org>
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Shannon Zhao 2016-03-24 16:01:16 +00:00 committed by Will Deacon
parent a6002ec5a8
commit b8cfadfcef
5 changed files with 66 additions and 123 deletions

View file

@ -27,7 +27,6 @@
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_perf_event.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED

View file

@ -21,7 +21,6 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_perf_event.h>
#include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace

View file

@ -1,68 +0,0 @@
/*
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_KVM_PERF_EVENT_H
#define __ASM_KVM_PERF_EVENT_H
#define ARMV8_PMU_MAX_COUNTERS 32
#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
/*
* Per-CPU PMCR: config reg
*/
#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
/* Determines which bit of PMCCNTR_EL0 generates an overflow */
#define ARMV8_PMU_PMCR_LC (1 << 6)
#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMU_PMCR_N_MASK 0x1f
#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */
/*
* PMOVSR: counters overflow flag status reg
*/
#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
/*
* PMXEVTYPER: Event selection reg
*/
#define ARMV8_PMU_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
#define ARMV8_PMU_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
/*
* Event filters for PMUv3
*/
#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31)
#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30)
#define ARMV8_PMU_INCLUDE_EL2 (1 << 27)
/*
* PMUSERENR: user enable reg
*/
#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
#endif

View file

@ -17,6 +17,53 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
#define ARMV8_PMU_MAX_COUNTERS 32
#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
/*
* Per-CPU PMCR: config reg
*/
#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMU_PMCR_N_MASK 0x1f
#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */
/*
* PMOVSR: counters overflow flag status reg
*/
#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
/*
* PMXEVTYPER: Event selection reg
*/
#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
/*
* Event filters for PMUv3
*/
#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31)
#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30)
#define ARMV8_PMU_INCLUDE_EL2 (1 << 27)
/*
* PMUSERENR: user enable reg
*/
#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);

View file

@ -20,6 +20,7 @@
*/
#include <asm/irq_regs.h>
#include <asm/perf_event.h>
#include <asm/virt.h>
#include <linux/of.h>
@ -384,9 +385,6 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
#define ARMV8_MAX_COUNTERS 32
#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
/*
* ARMv8 low level PMU access
*/
@ -395,40 +393,7 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
* Perf Event to low level counters mapping
*/
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
/*
* Per-CPU PMCR: config reg
*/
#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMCR_N_MASK 0x1f
#define ARMV8_PMCR_MASK 0x7f /* Mask for writable bits */
/*
* PMOVSR: counters overflow flag status reg
*/
#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
/*
* PMXEVTYPER: Event selection reg
*/
#define ARMV8_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
#define ARMV8_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
/*
* Event filters for PMUv3
*/
#define ARMV8_EXCLUDE_EL1 (1 << 31)
#define ARMV8_EXCLUDE_EL0 (1 << 30)
#define ARMV8_INCLUDE_EL2 (1 << 27)
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
static inline u32 armv8pmu_pmcr_read(void)
{
@ -439,14 +404,14 @@ static inline u32 armv8pmu_pmcr_read(void)
static inline void armv8pmu_pmcr_write(u32 val)
{
val &= ARMV8_PMCR_MASK;
val &= ARMV8_PMU_PMCR_MASK;
isb();
asm volatile("msr pmcr_el0, %0" :: "r" (val));
}
static inline int armv8pmu_has_overflowed(u32 pmovsr)
{
return pmovsr & ARMV8_OVERFLOWED_MASK;
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
}
static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
@ -512,7 +477,7 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
if (armv8pmu_select_counter(idx) == idx) {
val &= ARMV8_EVTYPE_MASK;
val &= ARMV8_PMU_EVTYPE_MASK;
asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
}
}
@ -558,7 +523,7 @@ static inline u32 armv8pmu_getreset_flags(void)
asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
/* Write to clear flags */
value &= ARMV8_OVSR_MASK;
value &= ARMV8_PMU_OVSR_MASK;
asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
return value;
@ -696,7 +661,7 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
@ -707,7 +672,7 @@ static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
@ -717,7 +682,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
int idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
@ -754,11 +719,11 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
attr->exclude_kernel != attr->exclude_hv)
return -EINVAL;
if (attr->exclude_user)
config_base |= ARMV8_EXCLUDE_EL0;
config_base |= ARMV8_PMU_EXCLUDE_EL0;
if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
config_base |= ARMV8_EXCLUDE_EL1;
config_base |= ARMV8_PMU_EXCLUDE_EL1;
if (!attr->exclude_hv)
config_base |= ARMV8_INCLUDE_EL2;
config_base |= ARMV8_PMU_INCLUDE_EL2;
/*
* Install the filter into config_base as this is used to
@ -784,35 +749,36 @@ static void armv8pmu_reset(void *info)
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
*/
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC);
armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
ARMV8_PMU_PMCR_LC);
}
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_pmuv3_perf_map,
&armv8_pmuv3_perf_cache_map,
ARMV8_EVTYPE_EVENT);
ARMV8_PMU_EVTYPE_EVENT);
}
static int armv8_a53_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_a53_perf_map,
&armv8_a53_perf_cache_map,
ARMV8_EVTYPE_EVENT);
ARMV8_PMU_EVTYPE_EVENT);
}
static int armv8_a57_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_a57_perf_map,
&armv8_a57_perf_cache_map,
ARMV8_EVTYPE_EVENT);
ARMV8_PMU_EVTYPE_EVENT);
}
static int armv8_thunder_map_event(struct perf_event *event)
{
return armpmu_map_event(event, &armv8_thunder_perf_map,
&armv8_thunder_perf_cache_map,
ARMV8_EVTYPE_EVENT);
ARMV8_PMU_EVTYPE_EVENT);
}
static void armv8pmu_read_num_pmnc_events(void *info)
@ -820,7 +786,7 @@ static void armv8pmu_read_num_pmnc_events(void *info)
int *nb_cnt = info;
/* Read the nb of CNTx counters supported from PMNC */
*nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
*nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
/* Add the CPU cycles counter */
*nb_cnt += 1;