alistair23-linux/drivers/perf/arm_pmu_acpi.c
Linus Torvalds dfd437a257 arm64 updates for 5.3:
- arm64 support for syscall emulation via PTRACE_SYSEMU{,_SINGLESTEP}
 
 - Wire up VM_FLUSH_RESET_PERMS for arm64, allowing the core code to
   manage the permissions of executable vmalloc regions more strictly
 
 - Slight performance improvement by keeping softirqs enabled while
   touching the FPSIMD/SVE state (kernel_neon_begin/end)
 
 - Expose a couple of ARMv8.5 features to user (HWCAP): CondM (new XAFLAG
   and AXFLAG instructions for floating point comparison flags
   manipulation) and FRINT (rounding floating point numbers to integers)
 
 - Re-instate ARM64_PSEUDO_NMI support which was previously marked as
   BROKEN due to some bugs (now fixed)
 
 - Improve parking of stopped CPUs and implement an arm64-specific
   panic_smp_self_stop() to avoid warning on not being able to stop
   secondary CPUs during panic
 
 - perf: enable the ARM Statistical Profiling Extensions (SPE) on ACPI
   platforms
 
 - perf: DDR performance monitor support for iMX8QXP
 
 - cache_line_size() can now be set from DT or ACPI/PPTT if provided to
   cope with a system cache info not exposed via the CPUID registers
 
 - Avoid warning on hardware cache line size greater than
   ARCH_DMA_MINALIGN if the system is fully coherent
 
 - arm64 do_page_fault() and hugetlb cleanups
 
 - Refactor set_pte_at() to avoid redundant READ_ONCE(*ptep)
 
 - Ignore ACPI 5.1 FADTs reported as 5.0 (infer from the 'arm_boot_flags'
   introduced in 5.1)
 
 - CONFIG_RANDOMIZE_BASE now enabled in defconfig
 
 - Allow the selection of ARM64_MODULE_PLTS, currently only done via
   RANDOMIZE_BASE (and an erratum workaround), allowing modules to spill
   over into the vmalloc area
 
 - Make ZONE_DMA32 configurable
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE5RElWfyWxS+3PLO2a9axLQDIXvEFAl0eHqcACgkQa9axLQDI
 XvFyNA/+L+bnkz8m3ncydlqqfXomQn4eJJVQ8Uksb0knJz+1+3CUxxbO4ry4jXZN
 fMkbggYrDPRKpDbsUl0lsRipj7jW9bqan+N37c3SWqCkgb6HqDaHViwxdx6Ec/Uk
 gHudozDSPh/8c7hxGcSyt/CFyuW6b+8eYIQU5rtIgz8aVY2BypBvS/7YtYCbIkx0
 w4CFleRTK1zXD5mJQhrc6jyDx659sVkrAvdhf6YIymOY8nBTv40vwdNo3beJMYp8
 Po/+0Ixu+VkHUNtmYYZQgP/AGH96xiTcRnUqd172JdtRPpCLqnLqwFokXeVIlUKT
 KZFMDPzK+756Ayn4z4huEePPAOGlHbJje8JVNnFyreKhVVcCotW7YPY/oJR10bnc
 eo7yD+DxABTn+93G2yP436bNVa8qO1UqjOBfInWBtnNFJfANIkZweij/MQ6MjaTA
 o7KtviHnZFClefMPoiI7HDzwL8XSmsBDbeQ04s2Wxku1Y2xUHLx4iLmadwLQ1ZPb
 lZMTZP3N/T1554MoURVA1afCjAwiqU3bt1xDUGjbBVjLfSPBAn/25IacsG9Li9AF
 7Rp1M9VhrfLftjFFkB2HwpbhRASOxaOSx+EI3kzEfCtM2O9I1WHgP3rvCdc3l0HU
 tbK0/IggQicNgz7GSZ8xDlWPwwSadXYGLys+xlMZEYd3pDIOiFc=
 =0TDT
 -----END PGP SIGNATURE-----

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Catalin Marinas:

 - arm64 support for syscall emulation via PTRACE_SYSEMU{,_SINGLESTEP}

 - Wire up VM_FLUSH_RESET_PERMS for arm64, allowing the core code to
   manage the permissions of executable vmalloc regions more strictly

 - Slight performance improvement by keeping softirqs enabled while
   touching the FPSIMD/SVE state (kernel_neon_begin/end)

 - Expose a couple of ARMv8.5 features to user (HWCAP): CondM (new
   XAFLAG and AXFLAG instructions for floating point comparison flags
   manipulation) and FRINT (rounding floating point numbers to integers)

 - Re-instate ARM64_PSEUDO_NMI support which was previously marked as
   BROKEN due to some bugs (now fixed)

 - Improve parking of stopped CPUs and implement an arm64-specific
   panic_smp_self_stop() to avoid warning on not being able to stop
   secondary CPUs during panic

 - perf: enable the ARM Statistical Profiling Extensions (SPE) on ACPI
   platforms

 - perf: DDR performance monitor support for iMX8QXP

 - cache_line_size() can now be set from DT or ACPI/PPTT if provided to
   cope with a system cache info not exposed via the CPUID registers

 - Avoid warning on hardware cache line size greater than
   ARCH_DMA_MINALIGN if the system is fully coherent

 - arm64 do_page_fault() and hugetlb cleanups

 - Refactor set_pte_at() to avoid redundant READ_ONCE(*ptep)

 - Ignore ACPI 5.1 FADTs reported as 5.0 (infer from the
   'arm_boot_flags' introduced in 5.1)

 - CONFIG_RANDOMIZE_BASE now enabled in defconfig

 - Allow the selection of ARM64_MODULE_PLTS, currently only done via
   RANDOMIZE_BASE (and an erratum workaround), allowing modules to spill
   over into the vmalloc area

 - Make ZONE_DMA32 configurable

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (54 commits)
  perf: arm_spe: Enable ACPI/Platform automatic module loading
  arm_pmu: acpi: spe: Add initial MADT/SPE probing
  ACPI/PPTT: Add function to return ACPI 6.3 Identical tokens
  ACPI/PPTT: Modify node flag detection to find last IDENTICAL
  x86/entry: Simplify _TIF_SYSCALL_EMU handling
  arm64: rename dump_instr as dump_kernel_instr
  arm64/mm: Drop [PTE|PMD]_TYPE_FAULT
  arm64: Implement panic_smp_self_stop()
  arm64: Improve parking of stopped CPUs
  arm64: Expose FRINT capabilities to userspace
  arm64: Expose ARMv8.5 CondM capability to userspace
  arm64: defconfig: enable CONFIG_RANDOMIZE_BASE
  arm64: ARM64_MODULES_PLTS must depend on MODULES
  arm64: bpf: do not allocate executable memory
  arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages
  arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP
  arm64: module: create module allocations without exec permissions
  arm64: Allow user selection of ARM64_MODULE_PLTS
  acpi/arm64: ignore 5.1 FADTs that are reported as 5.0
  arm64: Allow selecting Pseudo-NMI again
  ...
2019-07-08 09:54:55 -07:00

362 lines
8.1 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI probing code for ARM performance counters.
*
* Copyright (C) 2017 ARM Ltd.
*/
#include <linux/acpi.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/percpu.h>
#include <linux/perf/arm_pmu.h>
#include <asm/cputype.h>
static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
static DEFINE_PER_CPU(int, pmu_irqs);
static int arm_pmu_acpi_register_irq(int cpu)
{
struct acpi_madt_generic_interrupt *gicc;
int gsi, trigger;
gicc = acpi_cpu_get_madt_gicc(cpu);
if (WARN_ON(!gicc))
return -EINVAL;
gsi = gicc->performance_interrupt;
/*
* Per the ACPI spec, the MADT cannot describe a PMU that doesn't
* have an interrupt. QEMU advertises this by using a GSI of zero,
* which is not known to be valid on any hardware despite being
* valid per the spec. Take the pragmatic approach and reject a
* GSI of zero for now.
*/
if (!gsi)
return 0;
if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
trigger = ACPI_EDGE_SENSITIVE;
else
trigger = ACPI_LEVEL_SENSITIVE;
/*
* Helpfully, the MADT GICC doesn't have a polarity flag for the
* "performance interrupt". Luckily, on compliant GICs the polarity is
* a fixed value in HW (for both SPIs and PPIs) that we cannot change
* from SW.
*
* Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
* may not match the real polarity, but that should not matter.
*
* Other interrupt controllers are not supported with ACPI.
*/
return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
}
static void arm_pmu_acpi_unregister_irq(int cpu)
{
struct acpi_madt_generic_interrupt *gicc;
int gsi;
gicc = acpi_cpu_get_madt_gicc(cpu);
if (!gicc)
return;
gsi = gicc->performance_interrupt;
acpi_unregister_gsi(gsi);
}
#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
static struct resource spe_resources[] = {
{
/* irq */
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device spe_dev = {
.name = ARMV8_SPE_PDEV_NAME,
.id = -1,
.resource = spe_resources,
.num_resources = ARRAY_SIZE(spe_resources)
};
/*
* For lack of a better place, hook the normal PMU MADT walk
* and create a SPE device if we detect a recent MADT with
* a homogeneous PPI mapping.
*/
static void arm_spe_acpi_register_device(void)
{
int cpu, hetid, irq, ret;
bool first = true;
u16 gsi = 0;
/*
* Sanity check all the GICC tables for the same interrupt number.
* For now, we only support homogeneous ACPI/SPE machines.
*/
for_each_possible_cpu(cpu) {
struct acpi_madt_generic_interrupt *gicc;
gicc = acpi_cpu_get_madt_gicc(cpu);
if (gicc->header.length < ACPI_MADT_GICC_SPE)
return;
if (first) {
gsi = gicc->spe_interrupt;
if (!gsi)
return;
hetid = find_acpi_cpu_topology_hetero_id(cpu);
first = false;
} else if ((gsi != gicc->spe_interrupt) ||
(hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
pr_warn("ACPI: SPE must be homogeneous\n");
return;
}
}
irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_HIGH);
if (irq < 0) {
pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
return;
}
spe_resources[0].start = irq;
ret = platform_device_register(&spe_dev);
if (ret < 0) {
pr_warn("ACPI: SPE: Unable to register device\n");
acpi_unregister_gsi(gsi);
}
}
#else
static inline void arm_spe_acpi_register_device(void)
{
}
#endif /* CONFIG_ARM_SPE_PMU */
static int arm_pmu_acpi_parse_irqs(void)
{
int irq, cpu, irq_cpu, err;
for_each_possible_cpu(cpu) {
irq = arm_pmu_acpi_register_irq(cpu);
if (irq < 0) {
err = irq;
pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
cpu, err);
goto out_err;
} else if (irq == 0) {
pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
}
/*
* Log and request the IRQ so the core arm_pmu code can manage
* it. We'll have to sanity-check IRQs later when we associate
* them with their PMUs.
*/
per_cpu(pmu_irqs, cpu) = irq;
armpmu_request_irq(irq, cpu);
}
return 0;
out_err:
for_each_possible_cpu(cpu) {
irq = per_cpu(pmu_irqs, cpu);
if (!irq)
continue;
arm_pmu_acpi_unregister_irq(cpu);
/*
* Blat all copies of the IRQ so that we only unregister the
* corresponding GSI once (e.g. when we have PPIs).
*/
for_each_possible_cpu(irq_cpu) {
if (per_cpu(pmu_irqs, irq_cpu) == irq)
per_cpu(pmu_irqs, irq_cpu) = 0;
}
}
return err;
}
static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
{
unsigned long cpuid = read_cpuid_id();
struct arm_pmu *pmu;
int cpu;
for_each_possible_cpu(cpu) {
pmu = per_cpu(probed_pmus, cpu);
if (!pmu || pmu->acpi_cpuid != cpuid)
continue;
return pmu;
}
pmu = armpmu_alloc_atomic();
if (!pmu) {
pr_warn("Unable to allocate PMU for CPU%d\n",
smp_processor_id());
return NULL;
}
pmu->acpi_cpuid = cpuid;
return pmu;
}
/*
* Check whether the new IRQ is compatible with those already associated with
* the PMU (e.g. we don't have mismatched PPIs).
*/
static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
{
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
int cpu;
if (!irq)
return true;
for_each_cpu(cpu, &pmu->supported_cpus) {
int other_irq = per_cpu(hw_events->irq, cpu);
if (!other_irq)
continue;
if (irq == other_irq)
continue;
if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
continue;
pr_warn("mismatched PPIs detected\n");
return false;
}
return true;
}
/*
* This must run before the common arm_pmu hotplug logic, so that we can
* associate a CPU and its interrupt before the common code tries to manage the
* affinity and so on.
*
* Note that hotplug events are serialized, so we cannot race with another CPU
* coming up. The perf core won't open events while a hotplug event is in
* progress.
*/
static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
{
struct arm_pmu *pmu;
struct pmu_hw_events __percpu *hw_events;
int irq;
/* If we've already probed this CPU, we have nothing to do */
if (per_cpu(probed_pmus, cpu))
return 0;
irq = per_cpu(pmu_irqs, cpu);
pmu = arm_pmu_acpi_find_alloc_pmu();
if (!pmu)
return -ENOMEM;
per_cpu(probed_pmus, cpu) = pmu;
if (pmu_irq_matches(pmu, irq)) {
hw_events = pmu->hw_events;
per_cpu(hw_events->irq, cpu) = irq;
}
cpumask_set_cpu(cpu, &pmu->supported_cpus);
/*
* Ideally, we'd probe the PMU here when we find the first matching
* CPU. We can't do that for several reasons; see the comment in
* arm_pmu_acpi_init().
*
* So for the time being, we're done.
*/
return 0;
}
int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
{
int pmu_idx = 0;
int cpu, ret;
/*
* Initialise and register the set of PMUs which we know about right
* now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
* could handle late hotplug, but this may lead to deadlock since we
* might try to register a hotplug notifier instance from within a
* hotplug notifier.
*
* There's also the problem of having access to the right init_fn,
* without tying this too deeply into the "real" PMU driver.
*
* For the moment, as with the platform/DT case, we need at least one
* of a PMU's CPUs to be online at probe time.
*/
for_each_possible_cpu(cpu) {
struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
char *base_name;
if (!pmu || pmu->name)
continue;
ret = init_fn(pmu);
if (ret == -ENODEV) {
/* PMU not handled by this driver, or not present */
continue;
} else if (ret) {
pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
return ret;
}
base_name = pmu->name;
pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
if (!pmu->name) {
pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
return -ENOMEM;
}
ret = armpmu_register(pmu);
if (ret) {
pr_warn("Failed to register PMU for CPU%d\n", cpu);
kfree(pmu->name);
return ret;
}
}
return 0;
}
static int arm_pmu_acpi_init(void)
{
int ret;
if (acpi_disabled)
return 0;
arm_spe_acpi_register_device();
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
"perf/arm/pmu_acpi:starting",
arm_pmu_acpi_cpu_starting, NULL);
return ret;
}
subsys_initcall(arm_pmu_acpi_init)