mirror of
https://github.com/torvalds/linux.git
synced 2024-11-20 02:51:44 +00:00
Merge branch 'will/for-next/perf' into for-next/core
* will/for-next/perf: arm64: pmuv3: use arm_pmu ACPI framework arm64: pmuv3: handle !PMUv3 when probing drivers/perf: arm_pmu: add ACPI framework arm64: add function to get a cpu's MADT GICC table drivers/perf: arm_pmu: split out platform device probe logic drivers/perf: arm_pmu: move irq request/free into probe drivers/perf: arm_pmu: split cpu-local irq request/free drivers/perf: arm_pmu: rename irq request/free functions drivers/perf: arm_pmu: handle no platform_device drivers/perf: arm_pmu: simplify cpu_pmu_request_irqs() drivers/perf: arm_pmu: factor out pmu registration drivers/perf: arm_pmu: fold init into alloc drivers/perf: arm_pmu: define armpmu_init_fn drivers/perf: arm_pmu: remove pointless PMU disabling perf: qcom: Add L3 cache PMU driver drivers/perf: arm_pmu: split irq request from enable drivers/perf: arm_pmu: manage interrupts per-cpu drivers/perf: arm_pmu: rework per-cpu allocation MAINTAINERS: Add file patterns for perf device tree bindings
This commit is contained in:
commit
494bc3cd3d
25
Documentation/perf/qcom_l3_pmu.txt
Normal file
25
Documentation/perf/qcom_l3_pmu.txt
Normal file
@ -0,0 +1,25 @@
|
||||
Qualcomm Datacenter Technologies L3 Cache Performance Monitoring Unit (PMU)
|
||||
===========================================================================
|
||||
|
||||
This driver supports the L3 cache PMUs found in Qualcomm Datacenter Technologies
|
||||
Centriq SoCs. The L3 cache on these SOCs is composed of multiple slices, shared
|
||||
by all cores within a socket. Each slice is exposed as a separate uncore perf
|
||||
PMU with device name l3cache_<socket>_<instance>. User space is responsible
|
||||
for aggregating across slices.
|
||||
|
||||
The driver provides a description of its available events and configuration
|
||||
options in sysfs, see /sys/devices/l3cache*. Given that these are uncore PMUs
|
||||
the driver also exposes a "cpumask" sysfs attribute which contains a mask
|
||||
consisting of one CPU per socket which will be used to handle all the PMU
|
||||
events on that socket.
|
||||
|
||||
The hardware implements 32bit event counters and has a flat 8bit event space
|
||||
exposed via the "event" format attribute. In addition to the 32bit physical
|
||||
counters the driver supports virtual 64bit hardware counters by using hardware
|
||||
counter chaining. This feature is exposed via the "lc" (long counter) format
|
||||
flag. E.g.:
|
||||
|
||||
perf stat -e l3cache_0_0/read-miss,lc/
|
||||
|
||||
Given that these are uncore PMUs the driver does not support sampling, therefore
|
||||
"perf record" will not work. Per-task perf sessions are not supported.
|
@ -976,6 +976,7 @@ F: arch/arm*/include/asm/perf_event.h
|
||||
F: drivers/perf/*
|
||||
F: include/linux/perf/arm_pmu.h
|
||||
F: Documentation/devicetree/bindings/arm/pmu.txt
|
||||
F: Documentation/devicetree/bindings/perf/
|
||||
|
||||
ARM PORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
|
@ -85,6 +85,8 @@ static inline bool acpi_has_cpu_in_madt(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu);
|
||||
|
||||
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
|
||||
void __init acpi_init_cpus(void);
|
||||
|
||||
|
@ -957,11 +957,26 @@ static int armv8_vulcan_map_event(struct perf_event *event)
|
||||
ARMV8_PMU_EVTYPE_EVENT);
|
||||
}
|
||||
|
||||
struct armv8pmu_probe_info {
|
||||
struct arm_pmu *pmu;
|
||||
bool present;
|
||||
};
|
||||
|
||||
static void __armv8pmu_probe_pmu(void *info)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = info;
|
||||
struct armv8pmu_probe_info *probe = info;
|
||||
struct arm_pmu *cpu_pmu = probe->pmu;
|
||||
u64 dfr0, pmuver;
|
||||
u32 pmceid[2];
|
||||
|
||||
dfr0 = read_sysreg(id_aa64dfr0_el1);
|
||||
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
|
||||
ID_AA64DFR0_PMUVER_SHIFT);
|
||||
if (pmuver != 1)
|
||||
return;
|
||||
|
||||
probe->present = true;
|
||||
|
||||
/* Read the nb of CNTx counters supported from PMNC */
|
||||
cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
|
||||
& ARMV8_PMU_PMCR_N_MASK;
|
||||
@ -979,13 +994,27 @@ static void __armv8pmu_probe_pmu(void *info)
|
||||
|
||||
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return smp_call_function_any(&cpu_pmu->supported_cpus,
|
||||
struct armv8pmu_probe_info probe = {
|
||||
.pmu = cpu_pmu,
|
||||
.present = false,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = smp_call_function_any(&cpu_pmu->supported_cpus,
|
||||
__armv8pmu_probe_pmu,
|
||||
cpu_pmu, 1);
|
||||
&probe, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return probe.present ? 0 : -ENODEV;
|
||||
}
|
||||
|
||||
static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
int ret = armv8pmu_probe_pmu(cpu_pmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cpu_pmu->handle_irq = armv8pmu_handle_irq,
|
||||
cpu_pmu->enable = armv8pmu_enable_event,
|
||||
cpu_pmu->disable = armv8pmu_disable_event,
|
||||
@ -997,78 +1026,104 @@ static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->reset = armv8pmu_reset,
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1,
|
||||
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv8_pmu_init(cpu_pmu);
|
||||
int ret = armv8_pmu_init(cpu_pmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cpu_pmu->name = "armv8_pmuv3";
|
||||
cpu_pmu->map_event = armv8_pmuv3_map_event;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
|
||||
&armv8_pmuv3_events_attr_group;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
|
||||
&armv8_pmuv3_format_attr_group;
|
||||
return armv8pmu_probe_pmu(cpu_pmu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv8_pmu_init(cpu_pmu);
|
||||
int ret = armv8_pmu_init(cpu_pmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cpu_pmu->name = "armv8_cortex_a53";
|
||||
cpu_pmu->map_event = armv8_a53_map_event;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
|
||||
&armv8_pmuv3_events_attr_group;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
|
||||
&armv8_pmuv3_format_attr_group;
|
||||
return armv8pmu_probe_pmu(cpu_pmu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv8_pmu_init(cpu_pmu);
|
||||
int ret = armv8_pmu_init(cpu_pmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cpu_pmu->name = "armv8_cortex_a57";
|
||||
cpu_pmu->map_event = armv8_a57_map_event;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
|
||||
&armv8_pmuv3_events_attr_group;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
|
||||
&armv8_pmuv3_format_attr_group;
|
||||
return armv8pmu_probe_pmu(cpu_pmu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv8_pmu_init(cpu_pmu);
|
||||
int ret = armv8_pmu_init(cpu_pmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cpu_pmu->name = "armv8_cortex_a72";
|
||||
cpu_pmu->map_event = armv8_a57_map_event;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
|
||||
&armv8_pmuv3_events_attr_group;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
|
||||
&armv8_pmuv3_format_attr_group;
|
||||
return armv8pmu_probe_pmu(cpu_pmu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv8_pmu_init(cpu_pmu);
|
||||
int ret = armv8_pmu_init(cpu_pmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cpu_pmu->name = "armv8_cavium_thunder";
|
||||
cpu_pmu->map_event = armv8_thunder_map_event;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
|
||||
&armv8_pmuv3_events_attr_group;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
|
||||
&armv8_pmuv3_format_attr_group;
|
||||
return armv8pmu_probe_pmu(cpu_pmu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv8_pmu_init(cpu_pmu);
|
||||
int ret = armv8_pmu_init(cpu_pmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cpu_pmu->name = "armv8_brcm_vulcan";
|
||||
cpu_pmu->map_event = armv8_vulcan_map_event;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
|
||||
&armv8_pmuv3_events_attr_group;
|
||||
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
|
||||
&armv8_pmuv3_format_attr_group;
|
||||
return armv8pmu_probe_pmu(cpu_pmu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id armv8_pmu_of_device_ids[] = {
|
||||
@ -1081,24 +1136,9 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
|
||||
{},
|
||||
};
|
||||
|
||||
/*
|
||||
* Non DT systems have their micro/arch events probed at run-time.
|
||||
* A fairly complete list of generic events are provided and ones that
|
||||
* aren't supported by the current PMU are disabled.
|
||||
*/
|
||||
static const struct pmu_probe_info armv8_pmu_probe_table[] = {
|
||||
PMU_PROBE(0, 0, armv8_pmuv3_init), /* enable all defined counters */
|
||||
{ /* sentinel value */ }
|
||||
};
|
||||
|
||||
static int armv8_pmu_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
if (acpi_disabled)
|
||||
return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
|
||||
NULL);
|
||||
|
||||
return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
|
||||
armv8_pmu_probe_table);
|
||||
return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
|
||||
}
|
||||
|
||||
static struct platform_driver armv8_pmu_driver = {
|
||||
@ -1109,4 +1149,11 @@ static struct platform_driver armv8_pmu_driver = {
|
||||
.probe = armv8_pmu_device_probe,
|
||||
};
|
||||
|
||||
builtin_platform_driver(armv8_pmu_driver);
|
||||
static int __init armv8_pmu_driver_init(void)
|
||||
{
|
||||
if (acpi_disabled)
|
||||
return platform_driver_register(&armv8_pmu_driver);
|
||||
else
|
||||
return arm_pmu_acpi_probe(armv8_pmuv3_init);
|
||||
}
|
||||
device_initcall(armv8_pmu_driver_init)
|
||||
|
@ -521,6 +521,13 @@ static bool bootcpu_valid __initdata;
|
||||
static unsigned int cpu_count = 1;
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
|
||||
|
||||
struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
|
||||
{
|
||||
return &cpu_madt_gicc[cpu];
|
||||
}
|
||||
|
||||
/*
|
||||
* acpi_map_gic_cpu_interface - parse processor MADT entry
|
||||
*
|
||||
@ -555,6 +562,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
|
||||
return;
|
||||
}
|
||||
bootcpu_valid = true;
|
||||
cpu_madt_gicc[0] = *processor;
|
||||
early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
|
||||
return;
|
||||
}
|
||||
@ -565,6 +573,8 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
|
||||
/* map the logical cpu id to cpu MPIDR */
|
||||
cpu_logical_map(cpu_count) = hwid;
|
||||
|
||||
cpu_madt_gicc[cpu_count] = *processor;
|
||||
|
||||
/*
|
||||
* Set-up the ACPI parking protocol cpu entries
|
||||
* while initializing the cpu_logical_map to
|
||||
|
@ -12,6 +12,10 @@ config ARM_PMU
|
||||
Say y if you want to use CPU performance monitors on ARM-based
|
||||
systems.
|
||||
|
||||
config ARM_PMU_ACPI
|
||||
depends on ARM_PMU && ACPI
|
||||
def_bool y
|
||||
|
||||
config QCOM_L2_PMU
|
||||
bool "Qualcomm Technologies L2-cache PMU"
|
||||
depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI
|
||||
@ -21,6 +25,16 @@ config QCOM_L2_PMU
|
||||
Adds the L2 cache PMU into the perf events subsystem for
|
||||
monitoring L2 cache events.
|
||||
|
||||
config QCOM_L3_PMU
|
||||
bool "Qualcomm Technologies L3-cache PMU"
|
||||
depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI
|
||||
select QCOM_IRQ_COMBINER
|
||||
help
|
||||
Provides support for the L3 cache performance monitor unit (PMU)
|
||||
in Qualcomm Technologies processors.
|
||||
Adds the L3 cache PMU into the perf events subsystem for
|
||||
monitoring L3 cache events.
|
||||
|
||||
config XGENE_PMU
|
||||
depends on PERF_EVENTS && ARCH_XGENE
|
||||
bool "APM X-Gene SoC PMU"
|
||||
|
@ -1,3 +1,5 @@
|
||||
obj-$(CONFIG_ARM_PMU) += arm_pmu.o
|
||||
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
|
||||
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
|
||||
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
|
||||
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
|
||||
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/perf/arm_pmu.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
@ -25,7 +24,6 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdesc.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
static int
|
||||
@ -235,20 +233,15 @@ armpmu_add(struct perf_event *event, int flags)
|
||||
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx;
|
||||
int err = 0;
|
||||
|
||||
/* An event following a process won't be stopped earlier */
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
|
||||
return -ENOENT;
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
/* If we don't have a space for the counter then finish early. */
|
||||
idx = armpmu->get_event_idx(hw_events, event);
|
||||
if (idx < 0) {
|
||||
err = idx;
|
||||
goto out;
|
||||
}
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
/*
|
||||
* If there is an event in the counter we are going to use then make
|
||||
@ -265,9 +258,7 @@ armpmu_add(struct perf_event *event, int flags)
|
||||
/* Propagate our changes to the userspace mapping. */
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
out:
|
||||
perf_pmu_enable(event->pmu);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -323,10 +314,16 @@ validate_group(struct perf_event *event)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
|
||||
{
|
||||
struct platform_device *pdev = armpmu->plat_device;
|
||||
|
||||
return pdev ? dev_get_platdata(&pdev->dev) : NULL;
|
||||
}
|
||||
|
||||
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
|
||||
{
|
||||
struct arm_pmu *armpmu;
|
||||
struct platform_device *plat_device;
|
||||
struct arm_pmu_platdata *plat;
|
||||
int ret;
|
||||
u64 start_clock, finish_clock;
|
||||
@ -338,8 +335,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
|
||||
* dereference.
|
||||
*/
|
||||
armpmu = *(void **)dev;
|
||||
plat_device = armpmu->plat_device;
|
||||
plat = dev_get_platdata(&plat_device->dev);
|
||||
|
||||
plat = armpmu_get_platdata(armpmu);
|
||||
|
||||
start_clock = sched_clock();
|
||||
if (plat && plat->handle_irq)
|
||||
@ -352,37 +349,6 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
armpmu_release_hardware(struct arm_pmu *armpmu)
|
||||
{
|
||||
armpmu->free_irq(armpmu);
|
||||
}
|
||||
|
||||
static int
|
||||
armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
||||
{
|
||||
int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
|
||||
if (err) {
|
||||
armpmu_release_hardware(armpmu);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
hw_perf_event_destroy(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
atomic_t *active_events = &armpmu->active_events;
|
||||
struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
|
||||
|
||||
if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
|
||||
armpmu_release_hardware(armpmu);
|
||||
mutex_unlock(pmu_reserve_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
event_requires_mode_exclusion(struct perf_event_attr *attr)
|
||||
{
|
||||
@ -455,8 +421,6 @@ __hw_perf_event_init(struct perf_event *event)
|
||||
static int armpmu_event_init(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
int err = 0;
|
||||
atomic_t *active_events = &armpmu->active_events;
|
||||
|
||||
/*
|
||||
* Reject CPU-affine events for CPUs that are of a different class to
|
||||
@ -476,26 +440,7 @@ static int armpmu_event_init(struct perf_event *event)
|
||||
if (armpmu->map_event(event) == -ENOENT)
|
||||
return -ENOENT;
|
||||
|
||||
event->destroy = hw_perf_event_destroy;
|
||||
|
||||
if (!atomic_inc_not_zero(active_events)) {
|
||||
mutex_lock(&armpmu->reserve_mutex);
|
||||
if (atomic_read(active_events) == 0)
|
||||
err = armpmu_reserve_hardware(armpmu);
|
||||
|
||||
if (!err)
|
||||
atomic_inc(active_events);
|
||||
mutex_unlock(&armpmu->reserve_mutex);
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = __hw_perf_event_init(event);
|
||||
if (err)
|
||||
hw_perf_event_destroy(event);
|
||||
|
||||
return err;
|
||||
return __hw_perf_event_init(event);
|
||||
}
|
||||
|
||||
static void armpmu_enable(struct pmu *pmu)
|
||||
@ -553,27 +498,6 @@ static struct attribute_group armpmu_common_attr_group = {
|
||||
.attrs = armpmu_common_attrs,
|
||||
};
|
||||
|
||||
static void armpmu_init(struct arm_pmu *armpmu)
|
||||
{
|
||||
atomic_set(&armpmu->active_events, 0);
|
||||
mutex_init(&armpmu->reserve_mutex);
|
||||
|
||||
armpmu->pmu = (struct pmu) {
|
||||
.pmu_enable = armpmu_enable,
|
||||
.pmu_disable = armpmu_disable,
|
||||
.event_init = armpmu_event_init,
|
||||
.add = armpmu_add,
|
||||
.del = armpmu_del,
|
||||
.start = armpmu_start,
|
||||
.stop = armpmu_stop,
|
||||
.read = armpmu_read,
|
||||
.filter_match = armpmu_filter_match,
|
||||
.attr_groups = armpmu->attr_groups,
|
||||
};
|
||||
armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
|
||||
&armpmu_common_attr_group;
|
||||
}
|
||||
|
||||
/* Set at runtime when we know what CPU type we are. */
|
||||
static struct arm_pmu *__oprofile_cpu_pmu;
|
||||
|
||||
@ -601,115 +525,87 @@ int perf_num_counters(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_num_counters);
|
||||
|
||||
static void cpu_pmu_enable_percpu_irq(void *data)
|
||||
void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
|
||||
{
|
||||
int irq = *(int *)data;
|
||||
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
|
||||
int irq = per_cpu(hw_events->irq, cpu);
|
||||
|
||||
enable_percpu_irq(irq, IRQ_TYPE_NONE);
|
||||
}
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
|
||||
return;
|
||||
|
||||
static void cpu_pmu_disable_percpu_irq(void *data)
|
||||
{
|
||||
int irq = *(int *)data;
|
||||
|
||||
disable_percpu_irq(irq);
|
||||
}
|
||||
|
||||
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
int i, irq, irqs;
|
||||
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
||||
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
||||
|
||||
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||
|
||||
irq = platform_get_irq(pmu_device, 0);
|
||||
if (irq > 0 && irq_is_percpu(irq)) {
|
||||
on_each_cpu_mask(&cpu_pmu->supported_cpus,
|
||||
cpu_pmu_disable_percpu_irq, &irq, 1);
|
||||
if (irq_is_percpu(irq)) {
|
||||
free_percpu_irq(irq, &hw_events->percpu_pmu);
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
int cpu = i;
|
||||
|
||||
if (cpu_pmu->irq_affinity)
|
||||
cpu = cpu_pmu->irq_affinity[i];
|
||||
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
|
||||
continue;
|
||||
irq = platform_get_irq(pmu_device, i);
|
||||
if (irq > 0)
|
||||
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
}
|
||||
cpumask_clear(&armpmu->active_irqs);
|
||||
return;
|
||||
}
|
||||
|
||||
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
}
|
||||
|
||||
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||
void armpmu_free_irqs(struct arm_pmu *armpmu)
|
||||
{
|
||||
int i, err, irq, irqs;
|
||||
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
||||
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
||||
int cpu;
|
||||
|
||||
if (!pmu_device)
|
||||
return -ENODEV;
|
||||
for_each_cpu(cpu, &armpmu->supported_cpus)
|
||||
armpmu_free_irq(armpmu, cpu);
|
||||
}
|
||||
|
||||
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||
if (irqs < 1) {
|
||||
pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
|
||||
int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
|
||||
{
|
||||
int err = 0;
|
||||
struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
|
||||
const irq_handler_t handler = armpmu_dispatch_irq;
|
||||
int irq = per_cpu(hw_events->irq, cpu);
|
||||
if (!irq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pmu_device, 0);
|
||||
if (irq > 0 && irq_is_percpu(irq)) {
|
||||
if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
|
||||
err = request_percpu_irq(irq, handler, "arm-pmu",
|
||||
&hw_events->percpu_pmu);
|
||||
if (err) {
|
||||
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
} else if (irq_is_percpu(irq)) {
|
||||
int other_cpu = cpumask_first(&armpmu->active_irqs);
|
||||
int other_irq = per_cpu(hw_events->irq, other_cpu);
|
||||
|
||||
on_each_cpu_mask(&cpu_pmu->supported_cpus,
|
||||
cpu_pmu_enable_percpu_irq, &irq, 1);
|
||||
if (irq != other_irq) {
|
||||
pr_warn("mismatched PPIs detected.\n");
|
||||
err = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
int cpu = i;
|
||||
|
||||
err = 0;
|
||||
irq = platform_get_irq(pmu_device, i);
|
||||
if (irq < 0)
|
||||
continue;
|
||||
|
||||
if (cpu_pmu->irq_affinity)
|
||||
cpu = cpu_pmu->irq_affinity[i];
|
||||
|
||||
/*
|
||||
* If we have a single PMU interrupt that we can't shift,
|
||||
* assume that we're running on a uniprocessor machine and
|
||||
* continue. Otherwise, continue without this interrupt.
|
||||
*/
|
||||
if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
|
||||
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
|
||||
irq, cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
err = request_irq(irq, handler,
|
||||
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
|
||||
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
if (err) {
|
||||
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
|
||||
}
|
||||
err = request_irq(irq, handler,
|
||||
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
|
||||
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
}
|
||||
|
||||
if (err) {
|
||||
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpu, &armpmu->active_irqs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int armpmu_request_irqs(struct arm_pmu *armpmu)
|
||||
{
|
||||
int cpu, err;
|
||||
|
||||
for_each_cpu(cpu, &armpmu->supported_cpus) {
|
||||
err = armpmu_request_irq(armpmu, cpu);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
|
||||
{
|
||||
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
|
||||
return per_cpu(hw_events->irq, cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* PMU hardware loses all context when a CPU goes offline.
|
||||
* When a CPU is hotplugged back in, since some hardware registers are
|
||||
@ -719,11 +615,42 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
|
||||
int irq;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
|
||||
return 0;
|
||||
if (pmu->reset)
|
||||
pmu->reset(pmu);
|
||||
|
||||
irq = armpmu_get_cpu_irq(pmu, cpu);
|
||||
if (irq) {
|
||||
if (irq_is_percpu(irq)) {
|
||||
enable_percpu_irq(irq, IRQ_TYPE_NONE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (irq_force_affinity(irq, cpumask_of(cpu)) &&
|
||||
num_possible_cpus() > 1) {
|
||||
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
|
||||
irq, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
|
||||
int irq;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
|
||||
return 0;
|
||||
|
||||
irq = armpmu_get_cpu_irq(pmu, cpu);
|
||||
if (irq && irq_is_percpu(irq))
|
||||
disable_percpu_irq(irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -828,56 +755,22 @@ static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
|
||||
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
int err;
|
||||
int cpu;
|
||||
struct pmu_hw_events __percpu *cpu_hw_events;
|
||||
|
||||
cpu_hw_events = alloc_percpu(struct pmu_hw_events);
|
||||
if (!cpu_hw_events)
|
||||
return -ENOMEM;
|
||||
|
||||
err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
|
||||
&cpu_pmu->node);
|
||||
err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
|
||||
&cpu_pmu->node);
|
||||
if (err)
|
||||
goto out_free;
|
||||
goto out;
|
||||
|
||||
err = cpu_pm_pmu_register(cpu_pmu);
|
||||
if (err)
|
||||
goto out_unregister;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
|
||||
raw_spin_lock_init(&events->pmu_lock);
|
||||
events->percpu_pmu = cpu_pmu;
|
||||
}
|
||||
|
||||
cpu_pmu->hw_events = cpu_hw_events;
|
||||
cpu_pmu->request_irq = cpu_pmu_request_irq;
|
||||
cpu_pmu->free_irq = cpu_pmu_free_irq;
|
||||
|
||||
/* Ensure the PMU has sane values out of reset. */
|
||||
if (cpu_pmu->reset)
|
||||
on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
|
||||
cpu_pmu, 1);
|
||||
|
||||
/* If no interrupts available, set the corresponding capability flag */
|
||||
if (!platform_get_irq(cpu_pmu->plat_device, 0))
|
||||
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
/*
|
||||
* This is a CPU PMU potentially in a heterogeneous configuration (e.g.
|
||||
* big.LITTLE). This is not an uncore PMU, and we have taken ctx
|
||||
* sharing into account (e.g. with our pmu::filter_match callback and
|
||||
* pmu::event_init group validation).
|
||||
*/
|
||||
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
|
||||
|
||||
return 0;
|
||||
|
||||
out_unregister:
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
|
||||
&cpu_pmu->node);
|
||||
out_free:
|
||||
free_percpu(cpu_hw_events);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -886,177 +779,78 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
|
||||
cpu_pm_pmu_unregister(cpu_pmu);
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
|
||||
&cpu_pmu->node);
|
||||
free_percpu(cpu_pmu->hw_events);
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU PMU identification and probing.
|
||||
*/
|
||||
static int probe_current_pmu(struct arm_pmu *pmu,
|
||||
const struct pmu_probe_info *info)
|
||||
struct arm_pmu *armpmu_alloc(void)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
unsigned int cpuid = read_cpuid_id();
|
||||
int ret = -ENODEV;
|
||||
|
||||
pr_info("probing PMU on CPU %d\n", cpu);
|
||||
|
||||
for (; info->init != NULL; info++) {
|
||||
if ((cpuid & info->mask) != info->cpuid)
|
||||
continue;
|
||||
ret = info->init(pmu);
|
||||
break;
|
||||
}
|
||||
|
||||
put_cpu();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int of_pmu_irq_cfg(struct arm_pmu *pmu)
|
||||
{
|
||||
int *irqs, i = 0;
|
||||
bool using_spi = false;
|
||||
struct platform_device *pdev = pmu->plat_device;
|
||||
|
||||
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
||||
if (!irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
do {
|
||||
struct device_node *dn;
|
||||
int cpu, irq;
|
||||
|
||||
/* See if we have an affinity entry */
|
||||
dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
|
||||
if (!dn)
|
||||
break;
|
||||
|
||||
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
|
||||
irq = platform_get_irq(pdev, i);
|
||||
if (irq > 0) {
|
||||
bool spi = !irq_is_percpu(irq);
|
||||
|
||||
if (i > 0 && spi != using_spi) {
|
||||
pr_err("PPI/SPI IRQ type mismatch for %s!\n",
|
||||
dn->name);
|
||||
of_node_put(dn);
|
||||
kfree(irqs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
using_spi = spi;
|
||||
}
|
||||
|
||||
/* Now look up the logical CPU number */
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct device_node *cpu_dn;
|
||||
|
||||
cpu_dn = of_cpu_device_node_get(cpu);
|
||||
of_node_put(cpu_dn);
|
||||
|
||||
if (dn == cpu_dn)
|
||||
break;
|
||||
}
|
||||
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
pr_warn("Failed to find logical CPU for %s\n",
|
||||
dn->name);
|
||||
of_node_put(dn);
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
break;
|
||||
}
|
||||
of_node_put(dn);
|
||||
|
||||
/* For SPIs, we need to track the affinity per IRQ */
|
||||
if (using_spi) {
|
||||
if (i >= pdev->num_resources)
|
||||
break;
|
||||
|
||||
irqs[i] = cpu;
|
||||
}
|
||||
|
||||
/* Keep track of the CPUs containing this PMU type */
|
||||
cpumask_set_cpu(cpu, &pmu->supported_cpus);
|
||||
i++;
|
||||
} while (1);
|
||||
|
||||
/* If we didn't manage to parse anything, try the interrupt affinity */
|
||||
if (cpumask_weight(&pmu->supported_cpus) == 0) {
|
||||
int irq = platform_get_irq(pdev, 0);
|
||||
|
||||
if (irq > 0 && irq_is_percpu(irq)) {
|
||||
/* If using PPIs, check the affinity of the partition */
|
||||
int ret;
|
||||
|
||||
ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
|
||||
if (ret) {
|
||||
kfree(irqs);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
/* Otherwise default to all CPUs */
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
}
|
||||
}
|
||||
|
||||
/* If we matched up the IRQ affinities, use them to route the SPIs */
|
||||
if (using_spi && i == pdev->num_resources)
|
||||
pmu->irq_affinity = irqs;
|
||||
else
|
||||
kfree(irqs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arm_pmu_device_probe(struct platform_device *pdev,
|
||||
const struct of_device_id *of_table,
|
||||
const struct pmu_probe_info *probe_table)
|
||||
{
|
||||
const struct of_device_id *of_id;
|
||||
const int (*init_fn)(struct arm_pmu *);
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct arm_pmu *pmu;
|
||||
int ret = -ENODEV;
|
||||
int cpu;
|
||||
|
||||
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
|
||||
pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
|
||||
if (!pmu) {
|
||||
pr_info("failed to allocate PMU device!\n");
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
armpmu_init(pmu);
|
||||
|
||||
pmu->plat_device = pdev;
|
||||
|
||||
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
|
||||
init_fn = of_id->data;
|
||||
|
||||
pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
|
||||
"secure-reg-access");
|
||||
|
||||
/* arm64 systems boot only as non-secure */
|
||||
if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
|
||||
pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
|
||||
pmu->secure_access = false;
|
||||
}
|
||||
|
||||
ret = of_pmu_irq_cfg(pmu);
|
||||
if (!ret)
|
||||
ret = init_fn(pmu);
|
||||
} else if (probe_table) {
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
ret = probe_current_pmu(pmu, probe_table);
|
||||
pmu->hw_events = alloc_percpu(struct pmu_hw_events);
|
||||
if (!pmu->hw_events) {
|
||||
pr_info("failed to allocate per-cpu PMU data.\n");
|
||||
goto out_free_pmu;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
|
||||
goto out_free;
|
||||
pmu->pmu = (struct pmu) {
|
||||
.pmu_enable = armpmu_enable,
|
||||
.pmu_disable = armpmu_disable,
|
||||
.event_init = armpmu_event_init,
|
||||
.add = armpmu_add,
|
||||
.del = armpmu_del,
|
||||
.start = armpmu_start,
|
||||
.stop = armpmu_stop,
|
||||
.read = armpmu_read,
|
||||
.filter_match = armpmu_filter_match,
|
||||
.attr_groups = pmu->attr_groups,
|
||||
/*
|
||||
* This is a CPU PMU potentially in a heterogeneous
|
||||
* configuration (e.g. big.LITTLE). This is not an uncore PMU,
|
||||
* and we have taken ctx sharing into account (e.g. with our
|
||||
* pmu::filter_match callback and pmu::event_init group
|
||||
* validation).
|
||||
*/
|
||||
.capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
|
||||
};
|
||||
|
||||
pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
|
||||
&armpmu_common_attr_group;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct pmu_hw_events *events;
|
||||
|
||||
events = per_cpu_ptr(pmu->hw_events, cpu);
|
||||
raw_spin_lock_init(&events->pmu_lock);
|
||||
events->percpu_pmu = pmu;
|
||||
}
|
||||
|
||||
return pmu;
|
||||
|
||||
out_free_pmu:
|
||||
kfree(pmu);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void armpmu_free(struct arm_pmu *pmu)
|
||||
{
|
||||
free_percpu(pmu->hw_events);
|
||||
kfree(pmu);
|
||||
}
|
||||
|
||||
int armpmu_register(struct arm_pmu *pmu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cpu_pmu_init(pmu);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
return ret;
|
||||
|
||||
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
|
||||
if (ret)
|
||||
@ -1066,17 +860,12 @@ int arm_pmu_device_probe(struct platform_device *pdev,
|
||||
__oprofile_cpu_pmu = pmu;
|
||||
|
||||
pr_info("enabled with %s PMU driver, %d counters available\n",
|
||||
pmu->name, pmu->num_events);
|
||||
pmu->name, pmu->num_events);
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy:
|
||||
cpu_pmu_destroy(pmu);
|
||||
out_free:
|
||||
pr_info("%s: failed to register PMU devices!\n",
|
||||
of_node_full_name(node));
|
||||
kfree(pmu->irq_affinity);
|
||||
kfree(pmu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1086,7 +875,8 @@ static int arm_pmu_hp_init(void)
|
||||
|
||||
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
|
||||
"perf/arm/pmu:starting",
|
||||
arm_perf_starting_cpu, NULL);
|
||||
arm_perf_starting_cpu,
|
||||
arm_perf_teardown_cpu);
|
||||
if (ret)
|
||||
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
|
||||
ret);
|
||||
|
256
drivers/perf/arm_pmu_acpi.c
Normal file
256
drivers/perf/arm_pmu_acpi.c
Normal file
@ -0,0 +1,256 @@
|
||||
/*
|
||||
* ACPI probing code for ARM performance counters.
|
||||
*
|
||||
* Copyright (C) 2017 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/perf/arm_pmu.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
|
||||
static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
|
||||
static DEFINE_PER_CPU(int, pmu_irqs);
|
||||
|
||||
static int arm_pmu_acpi_register_irq(int cpu)
|
||||
{
|
||||
struct acpi_madt_generic_interrupt *gicc;
|
||||
int gsi, trigger;
|
||||
|
||||
gicc = acpi_cpu_get_madt_gicc(cpu);
|
||||
if (WARN_ON(!gicc))
|
||||
return -EINVAL;
|
||||
|
||||
gsi = gicc->performance_interrupt;
|
||||
if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
|
||||
trigger = ACPI_EDGE_SENSITIVE;
|
||||
else
|
||||
trigger = ACPI_LEVEL_SENSITIVE;
|
||||
|
||||
/*
|
||||
* Helpfully, the MADT GICC doesn't have a polarity flag for the
|
||||
* "performance interrupt". Luckily, on compliant GICs the polarity is
|
||||
* a fixed value in HW (for both SPIs and PPIs) that we cannot change
|
||||
* from SW.
|
||||
*
|
||||
* Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
|
||||
* may not match the real polarity, but that should not matter.
|
||||
*
|
||||
* Other interrupt controllers are not supported with ACPI.
|
||||
*/
|
||||
return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
|
||||
}
|
||||
|
||||
static void arm_pmu_acpi_unregister_irq(int cpu)
|
||||
{
|
||||
struct acpi_madt_generic_interrupt *gicc;
|
||||
int gsi;
|
||||
|
||||
gicc = acpi_cpu_get_madt_gicc(cpu);
|
||||
if (!gicc)
|
||||
return;
|
||||
|
||||
gsi = gicc->performance_interrupt;
|
||||
acpi_unregister_gsi(gsi);
|
||||
}
|
||||
|
||||
static int arm_pmu_acpi_parse_irqs(void)
|
||||
{
|
||||
int irq, cpu, irq_cpu, err;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
irq = arm_pmu_acpi_register_irq(cpu);
|
||||
if (irq < 0) {
|
||||
err = irq;
|
||||
pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
|
||||
cpu, err);
|
||||
goto out_err;
|
||||
} else if (irq == 0) {
|
||||
pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
|
||||
}
|
||||
|
||||
per_cpu(pmu_irqs, cpu) = irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
for_each_possible_cpu(cpu) {
|
||||
irq = per_cpu(pmu_irqs, cpu);
|
||||
if (!irq)
|
||||
continue;
|
||||
|
||||
arm_pmu_acpi_unregister_irq(cpu);
|
||||
|
||||
/*
|
||||
* Blat all copies of the IRQ so that we only unregister the
|
||||
* corresponding GSI once (e.g. when we have PPIs).
|
||||
*/
|
||||
for_each_possible_cpu(irq_cpu) {
|
||||
if (per_cpu(pmu_irqs, irq_cpu) == irq)
|
||||
per_cpu(pmu_irqs, irq_cpu) = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
|
||||
{
|
||||
unsigned long cpuid = read_cpuid_id();
|
||||
struct arm_pmu *pmu;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pmu = per_cpu(probed_pmus, cpu);
|
||||
if (!pmu || pmu->acpi_cpuid != cpuid)
|
||||
continue;
|
||||
|
||||
return pmu;
|
||||
}
|
||||
|
||||
pmu = armpmu_alloc();
|
||||
if (!pmu) {
|
||||
pr_warn("Unable to allocate PMU for CPU%d\n",
|
||||
smp_processor_id());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pmu->acpi_cpuid = cpuid;
|
||||
|
||||
return pmu;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must run before the common arm_pmu hotplug logic, so that we can
|
||||
* associate a CPU and its interrupt before the common code tries to manage the
|
||||
* affinity and so on.
|
||||
*
|
||||
* Note that hotplug events are serialized, so we cannot race with another CPU
|
||||
* coming up. The perf core won't open events while a hotplug event is in
|
||||
* progress.
|
||||
*/
|
||||
static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
struct arm_pmu *pmu;
|
||||
struct pmu_hw_events __percpu *hw_events;
|
||||
int irq;
|
||||
|
||||
/* If we've already probed this CPU, we have nothing to do */
|
||||
if (per_cpu(probed_pmus, cpu))
|
||||
return 0;
|
||||
|
||||
irq = per_cpu(pmu_irqs, cpu);
|
||||
|
||||
pmu = arm_pmu_acpi_find_alloc_pmu();
|
||||
if (!pmu)
|
||||
return -ENOMEM;
|
||||
|
||||
cpumask_set_cpu(cpu, &pmu->supported_cpus);
|
||||
|
||||
per_cpu(probed_pmus, cpu) = pmu;
|
||||
|
||||
/*
|
||||
* Log and request the IRQ so the core arm_pmu code can manage it. In
|
||||
* some situations (e.g. mismatched PPIs), we may fail to request the
|
||||
* IRQ. However, it may be too late for us to do anything about it.
|
||||
* The common ARM PMU code will log a warning in this case.
|
||||
*/
|
||||
hw_events = pmu->hw_events;
|
||||
per_cpu(hw_events->irq, cpu) = irq;
|
||||
armpmu_request_irq(pmu, cpu);
|
||||
|
||||
/*
|
||||
* Ideally, we'd probe the PMU here when we find the first matching
|
||||
* CPU. We can't do that for several reasons; see the comment in
|
||||
* arm_pmu_acpi_init().
|
||||
*
|
||||
* So for the time being, we're done.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
|
||||
{
|
||||
int pmu_idx = 0;
|
||||
int cpu, ret;
|
||||
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Initialise and register the set of PMUs which we know about right
|
||||
* now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
|
||||
* could handle late hotplug, but this may lead to deadlock since we
|
||||
* might try to register a hotplug notifier instance from within a
|
||||
* hotplug notifier.
|
||||
*
|
||||
* There's also the problem of having access to the right init_fn,
|
||||
* without tying this too deeply into the "real" PMU driver.
|
||||
*
|
||||
* For the moment, as with the platform/DT case, we need at least one
|
||||
* of a PMU's CPUs to be online at probe time.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
|
||||
char *base_name;
|
||||
|
||||
if (!pmu || pmu->name)
|
||||
continue;
|
||||
|
||||
ret = init_fn(pmu);
|
||||
if (ret == -ENODEV) {
|
||||
/* PMU not handled by this driver, or not present */
|
||||
continue;
|
||||
} else if (ret) {
|
||||
pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
base_name = pmu->name;
|
||||
pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
|
||||
if (!pmu->name) {
|
||||
pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = armpmu_register(pmu);
|
||||
if (ret) {
|
||||
pr_warn("Failed to register PMU for CPU%d\n", cpu);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arm_pmu_acpi_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We can't request IRQs yet, since we don't know the cookie value
|
||||
* until we know which CPUs share the same logical PMU. We'll handle
|
||||
* that in arm_pmu_acpi_cpu_starting().
|
||||
*/
|
||||
ret = arm_pmu_acpi_parse_irqs();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
|
||||
"perf/arm/pmu_acpi:starting",
|
||||
arm_pmu_acpi_cpu_starting, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
subsys_initcall(arm_pmu_acpi_init)
|
235
drivers/perf/arm_pmu_platform.c
Normal file
235
drivers/perf/arm_pmu_platform.c
Normal file
@ -0,0 +1,235 @@
|
||||
/*
|
||||
* platform_device probing code for ARM performance counters.
|
||||
*
|
||||
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
|
||||
* Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
|
||||
*/
|
||||
#define pr_fmt(fmt) "hw perfevents: " fmt
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdesc.h>
|
||||
#include <linux/kconfig.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/perf/arm_pmu.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
static int probe_current_pmu(struct arm_pmu *pmu,
|
||||
const struct pmu_probe_info *info)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
unsigned int cpuid = read_cpuid_id();
|
||||
int ret = -ENODEV;
|
||||
|
||||
pr_info("probing PMU on CPU %d\n", cpu);
|
||||
|
||||
for (; info->init != NULL; info++) {
|
||||
if ((cpuid & info->mask) != info->cpuid)
|
||||
continue;
|
||||
ret = info->init(pmu);
|
||||
break;
|
||||
}
|
||||
|
||||
put_cpu();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
|
||||
{
|
||||
int cpu, ret;
|
||||
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
|
||||
|
||||
ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_cpu(cpu, &pmu->supported_cpus)
|
||||
per_cpu(hw_events->irq, cpu) = irq;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool pmu_has_irq_affinity(struct device_node *node)
|
||||
{
|
||||
return !!of_find_property(node, "interrupt-affinity", NULL);
|
||||
}
|
||||
|
||||
static int pmu_parse_irq_affinity(struct device_node *node, int i)
|
||||
{
|
||||
struct device_node *dn;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* If we don't have an interrupt-affinity property, we guess irq
|
||||
* affinity matches our logical CPU order, as we used to assume.
|
||||
* This is fragile, so we'll warn in pmu_parse_irqs().
|
||||
*/
|
||||
if (!pmu_has_irq_affinity(node))
|
||||
return i;
|
||||
|
||||
dn = of_parse_phandle(node, "interrupt-affinity", i);
|
||||
if (!dn) {
|
||||
pr_warn("failed to parse interrupt-affinity[%d] for %s\n",
|
||||
i, node->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Now look up the logical CPU number */
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct device_node *cpu_dn;
|
||||
|
||||
cpu_dn = of_cpu_device_node_get(cpu);
|
||||
of_node_put(cpu_dn);
|
||||
|
||||
if (dn == cpu_dn)
|
||||
break;
|
||||
}
|
||||
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
pr_warn("failed to find logical CPU for %s\n", dn->name);
|
||||
}
|
||||
|
||||
of_node_put(dn);
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
static int pmu_parse_irqs(struct arm_pmu *pmu)
|
||||
{
|
||||
int i = 0, num_irqs;
|
||||
struct platform_device *pdev = pmu->plat_device;
|
||||
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
|
||||
|
||||
num_irqs = platform_irq_count(pdev);
|
||||
if (num_irqs < 0) {
|
||||
pr_err("unable to count PMU IRQs\n");
|
||||
return num_irqs;
|
||||
}
|
||||
|
||||
/*
|
||||
* In this case we have no idea which CPUs are covered by the PMU.
|
||||
* To match our prior behaviour, we assume all CPUs in this case.
|
||||
*/
|
||||
if (num_irqs == 0) {
|
||||
pr_warn("no irqs for PMU, sampling events not supported\n");
|
||||
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (num_irqs == 1) {
|
||||
int irq = platform_get_irq(pdev, 0);
|
||||
if (irq && irq_is_percpu(irq))
|
||||
return pmu_parse_percpu_irq(pmu, irq);
|
||||
}
|
||||
|
||||
if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
|
||||
pr_warn("no interrupt-affinity property for %s, guessing.\n",
|
||||
of_node_full_name(pdev->dev.of_node));
|
||||
}
|
||||
|
||||
/*
|
||||
* Some platforms have all PMU IRQs OR'd into a single IRQ, with a
|
||||
* special platdata function that attempts to demux them.
|
||||
*/
|
||||
if (dev_get_platdata(&pdev->dev))
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
|
||||
for (i = 0; i < num_irqs; i++) {
|
||||
int cpu, irq;
|
||||
|
||||
irq = platform_get_irq(pdev, i);
|
||||
if (WARN_ON(irq <= 0))
|
||||
continue;
|
||||
|
||||
if (irq_is_percpu(irq)) {
|
||||
pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
|
||||
if (cpu < 0)
|
||||
return cpu;
|
||||
if (cpu >= nr_cpu_ids)
|
||||
continue;
|
||||
|
||||
if (per_cpu(hw_events->irq, cpu)) {
|
||||
pr_warn("multiple PMU IRQs for the same CPU detected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
per_cpu(hw_events->irq, cpu) = irq;
|
||||
cpumask_set_cpu(cpu, &pmu->supported_cpus);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arm_pmu_device_probe(struct platform_device *pdev,
|
||||
const struct of_device_id *of_table,
|
||||
const struct pmu_probe_info *probe_table)
|
||||
{
|
||||
const struct of_device_id *of_id;
|
||||
armpmu_init_fn init_fn;
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct arm_pmu *pmu;
|
||||
int ret = -ENODEV;
|
||||
|
||||
pmu = armpmu_alloc();
|
||||
if (!pmu)
|
||||
return -ENOMEM;
|
||||
|
||||
pmu->plat_device = pdev;
|
||||
|
||||
ret = pmu_parse_irqs(pmu);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
|
||||
init_fn = of_id->data;
|
||||
|
||||
pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
|
||||
"secure-reg-access");
|
||||
|
||||
/* arm64 systems boot only as non-secure */
|
||||
if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
|
||||
pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
|
||||
pmu->secure_access = false;
|
||||
}
|
||||
|
||||
ret = init_fn(pmu);
|
||||
} else if (probe_table) {
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
ret = probe_current_pmu(pmu, probe_table);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = armpmu_request_irqs(pmu);
|
||||
if (ret)
|
||||
goto out_free_irqs;
|
||||
|
||||
ret = armpmu_register(pmu);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_irqs:
|
||||
armpmu_free_irqs(pmu);
|
||||
out_free:
|
||||
pr_info("%s: failed to register PMU devices!\n",
|
||||
of_node_full_name(node));
|
||||
armpmu_free(pmu);
|
||||
return ret;
|
||||
}
|
849
drivers/perf/qcom_l3_pmu.c
Normal file
849
drivers/perf/qcom_l3_pmu.c
Normal file
@ -0,0 +1,849 @@
|
||||
/*
|
||||
* Driver for the L3 cache PMUs in Qualcomm Technologies chips.
|
||||
*
|
||||
* The driver supports a distributed cache architecture where the overall
|
||||
* cache for a socket is comprised of multiple slices each with its own PMU.
|
||||
* Access to each individual PMU is provided even though all CPUs share all
|
||||
* the slices. User space needs to aggregate to individual counts to provide
|
||||
* a global picture.
|
||||
*
|
||||
* See Documentation/perf/qcom_l3_pmu.txt for more details.
|
||||
*
|
||||
* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
/*
|
||||
* General constants
|
||||
*/
|
||||
|
||||
/* Number of counters on each PMU */
|
||||
#define L3_NUM_COUNTERS 8
|
||||
/* Mask for the event type field within perf_event_attr.config and EVTYPE reg */
|
||||
#define L3_EVTYPE_MASK 0xFF
|
||||
/*
|
||||
* Bit position of the 'long counter' flag within perf_event_attr.config.
|
||||
* Reserve some space between the event type and this flag to allow expansion
|
||||
* in the event type field.
|
||||
*/
|
||||
#define L3_EVENT_LC_BIT 32
|
||||
|
||||
/*
|
||||
* Register offsets
|
||||
*/
|
||||
|
||||
/* Perfmon registers */
|
||||
#define L3_HML3_PM_CR 0x000
|
||||
#define L3_HML3_PM_EVCNTR(__cntr) (0x420 + ((__cntr) & 0x7) * 8)
|
||||
#define L3_HML3_PM_CNTCTL(__cntr) (0x120 + ((__cntr) & 0x7) * 8)
|
||||
#define L3_HML3_PM_EVTYPE(__cntr) (0x220 + ((__cntr) & 0x7) * 8)
|
||||
#define L3_HML3_PM_FILTRA 0x300
|
||||
#define L3_HML3_PM_FILTRB 0x308
|
||||
#define L3_HML3_PM_FILTRC 0x310
|
||||
#define L3_HML3_PM_FILTRAM 0x304
|
||||
#define L3_HML3_PM_FILTRBM 0x30C
|
||||
#define L3_HML3_PM_FILTRCM 0x314
|
||||
|
||||
/* Basic counter registers */
|
||||
#define L3_M_BC_CR 0x500
|
||||
#define L3_M_BC_SATROLL_CR 0x504
|
||||
#define L3_M_BC_CNTENSET 0x508
|
||||
#define L3_M_BC_CNTENCLR 0x50C
|
||||
#define L3_M_BC_INTENSET 0x510
|
||||
#define L3_M_BC_INTENCLR 0x514
|
||||
#define L3_M_BC_GANG 0x718
|
||||
#define L3_M_BC_OVSR 0x740
|
||||
#define L3_M_BC_IRQCTL 0x96C
|
||||
|
||||
/*
|
||||
* Bit field definitions
|
||||
*/
|
||||
|
||||
/* L3_HML3_PM_CR */
|
||||
#define PM_CR_RESET (0)
|
||||
|
||||
/* L3_HML3_PM_XCNTCTL/L3_HML3_PM_CNTCTLx */
|
||||
#define PMCNT_RESET (0)
|
||||
|
||||
/* L3_HML3_PM_EVTYPEx */
|
||||
#define EVSEL(__val) ((__val) & L3_EVTYPE_MASK)
|
||||
|
||||
/* Reset value for all the filter registers */
|
||||
#define PM_FLTR_RESET (0)
|
||||
|
||||
/* L3_M_BC_CR */
|
||||
#define BC_RESET (1UL << 1)
|
||||
#define BC_ENABLE (1UL << 0)
|
||||
|
||||
/* L3_M_BC_SATROLL_CR */
|
||||
#define BC_SATROLL_CR_RESET (0)
|
||||
|
||||
/* L3_M_BC_CNTENSET */
|
||||
#define PMCNTENSET(__cntr) (1UL << ((__cntr) & 0x7))
|
||||
|
||||
/* L3_M_BC_CNTENCLR */
|
||||
#define PMCNTENCLR(__cntr) (1UL << ((__cntr) & 0x7))
|
||||
#define BC_CNTENCLR_RESET (0xFF)
|
||||
|
||||
/* L3_M_BC_INTENSET */
|
||||
#define PMINTENSET(__cntr) (1UL << ((__cntr) & 0x7))
|
||||
|
||||
/* L3_M_BC_INTENCLR */
|
||||
#define PMINTENCLR(__cntr) (1UL << ((__cntr) & 0x7))
|
||||
#define BC_INTENCLR_RESET (0xFF)
|
||||
|
||||
/* L3_M_BC_GANG */
|
||||
#define GANG_EN(__cntr) (1UL << ((__cntr) & 0x7))
|
||||
#define BC_GANG_RESET (0)
|
||||
|
||||
/* L3_M_BC_OVSR */
|
||||
#define PMOVSRCLR(__cntr) (1UL << ((__cntr) & 0x7))
|
||||
#define PMOVSRCLR_RESET (0xFF)
|
||||
|
||||
/* L3_M_BC_IRQCTL */
|
||||
#define PMIRQONMSBEN(__cntr) (1UL << ((__cntr) & 0x7))
|
||||
#define BC_IRQCTL_RESET (0x0)
|
||||
|
||||
/*
|
||||
* Events
|
||||
*/
|
||||
|
||||
#define L3_EVENT_CYCLES 0x01
|
||||
#define L3_EVENT_READ_HIT 0x20
|
||||
#define L3_EVENT_READ_MISS 0x21
|
||||
#define L3_EVENT_READ_HIT_D 0x22
|
||||
#define L3_EVENT_READ_MISS_D 0x23
|
||||
#define L3_EVENT_WRITE_HIT 0x24
|
||||
#define L3_EVENT_WRITE_MISS 0x25
|
||||
|
||||
/*
|
||||
* Decoding of settings from perf_event_attr
|
||||
*
|
||||
* The config format for perf events is:
|
||||
* - config: bits 0-7: event type
|
||||
* bit 32: HW counter size requested, 0: 32 bits, 1: 64 bits
|
||||
*/
|
||||
|
||||
static inline u32 get_event_type(struct perf_event *event)
|
||||
{
|
||||
return (event->attr.config) & L3_EVTYPE_MASK;
|
||||
}
|
||||
|
||||
static inline bool event_uses_long_counter(struct perf_event *event)
|
||||
{
|
||||
return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT));
|
||||
}
|
||||
|
||||
static inline int event_num_counters(struct perf_event *event)
|
||||
{
|
||||
return event_uses_long_counter(event) ? 2 : 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Main PMU, inherits from the core perf PMU type
|
||||
*/
|
||||
struct l3cache_pmu {
|
||||
struct pmu pmu;
|
||||
struct hlist_node node;
|
||||
void __iomem *regs;
|
||||
struct perf_event *events[L3_NUM_COUNTERS];
|
||||
unsigned long used_mask[BITS_TO_LONGS(L3_NUM_COUNTERS)];
|
||||
cpumask_t cpumask;
|
||||
};
|
||||
|
||||
#define to_l3cache_pmu(p) (container_of(p, struct l3cache_pmu, pmu))
|
||||
|
||||
/*
|
||||
* Type used to group hardware counter operations
|
||||
*
|
||||
* Used to implement two types of hardware counters, standard (32bits) and
|
||||
* long (64bits). The hardware supports counter chaining which we use to
|
||||
* implement long counters. This support is exposed via the 'lc' flag field
|
||||
* in perf_event_attr.config.
|
||||
*/
|
||||
struct l3cache_event_ops {
|
||||
/* Called to start event monitoring */
|
||||
void (*start)(struct perf_event *event);
|
||||
/* Called to stop event monitoring */
|
||||
void (*stop)(struct perf_event *event, int flags);
|
||||
/* Called to update the perf_event */
|
||||
void (*update)(struct perf_event *event);
|
||||
};
|
||||
|
||||
/*
|
||||
* Implementation of long counter operations
|
||||
*
|
||||
* 64bit counters are implemented by chaining two of the 32bit physical
|
||||
* counters. The PMU only supports chaining of adjacent even/odd pairs
|
||||
* and for simplicity the driver always configures the odd counter to
|
||||
* count the overflows of the lower-numbered even counter. Note that since
|
||||
* the resulting hardware counter is 64bits no IRQs are required to maintain
|
||||
* the software counter which is also 64bits.
|
||||
*/
|
||||
|
||||
static void qcom_l3_cache__64bit_counter_start(struct perf_event *event)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
|
||||
int idx = event->hw.idx;
|
||||
u32 evsel = get_event_type(event);
|
||||
u32 gang;
|
||||
|
||||
/* Set the odd counter to count the overflows of the even counter */
|
||||
gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG);
|
||||
gang |= GANG_EN(idx + 1);
|
||||
writel_relaxed(gang, l3pmu->regs + L3_M_BC_GANG);
|
||||
|
||||
/* Initialize the hardware counters and reset prev_count*/
|
||||
local64_set(&event->hw.prev_count, 0);
|
||||
writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1));
|
||||
writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
|
||||
|
||||
/*
|
||||
* Set the event types, the upper half must use zero and the lower
|
||||
* half the actual event type
|
||||
*/
|
||||
writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(idx + 1));
|
||||
writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx));
|
||||
|
||||
/* Finally, enable the counters */
|
||||
writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx + 1));
|
||||
writel_relaxed(PMCNTENSET(idx + 1), l3pmu->regs + L3_M_BC_CNTENSET);
|
||||
writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx));
|
||||
writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET);
|
||||
}
|
||||
|
||||
static void qcom_l3_cache__64bit_counter_stop(struct perf_event *event,
|
||||
int flags)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
|
||||
int idx = event->hw.idx;
|
||||
u32 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG);
|
||||
|
||||
/* Disable the counters */
|
||||
writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR);
|
||||
writel_relaxed(PMCNTENCLR(idx + 1), l3pmu->regs + L3_M_BC_CNTENCLR);
|
||||
|
||||
/* Disable chaining */
|
||||
writel_relaxed(gang & ~GANG_EN(idx + 1), l3pmu->regs + L3_M_BC_GANG);
|
||||
}
|
||||
|
||||
static void qcom_l3_cache__64bit_counter_update(struct perf_event *event)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
|
||||
int idx = event->hw.idx;
|
||||
u32 hi, lo;
|
||||
u64 prev, new;
|
||||
|
||||
do {
|
||||
prev = local64_read(&event->hw.prev_count);
|
||||
do {
|
||||
hi = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1));
|
||||
lo = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
|
||||
} while (hi != readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)));
|
||||
new = ((u64)hi << 32) | lo;
|
||||
} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
|
||||
|
||||
local64_add(new - prev, &event->count);
|
||||
}
|
||||
|
||||
static const struct l3cache_event_ops event_ops_long = {
|
||||
.start = qcom_l3_cache__64bit_counter_start,
|
||||
.stop = qcom_l3_cache__64bit_counter_stop,
|
||||
.update = qcom_l3_cache__64bit_counter_update,
|
||||
};
|
||||
|
||||
/*
|
||||
* Implementation of standard counter operations
|
||||
*
|
||||
* 32bit counters use a single physical counter and a hardware feature that
|
||||
* asserts the overflow IRQ on the toggling of the most significant bit in
|
||||
* the counter. This feature allows the counters to be left free-running
|
||||
* without needing the usual reprogramming required to properly handle races
|
||||
* during concurrent calls to update.
|
||||
*/
|
||||
|
||||
static void qcom_l3_cache__32bit_counter_start(struct perf_event *event)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
|
||||
int idx = event->hw.idx;
|
||||
u32 evsel = get_event_type(event);
|
||||
u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL);
|
||||
|
||||
/* Set the counter to assert the overflow IRQ on MSB toggling */
|
||||
writel_relaxed(irqctl | PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL);
|
||||
|
||||
/* Initialize the hardware counter and reset prev_count*/
|
||||
local64_set(&event->hw.prev_count, 0);
|
||||
writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
|
||||
|
||||
/* Set the event type */
|
||||
writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx));
|
||||
|
||||
/* Enable interrupt generation by this counter */
|
||||
writel_relaxed(PMINTENSET(idx), l3pmu->regs + L3_M_BC_INTENSET);
|
||||
|
||||
/* Finally, enable the counter */
|
||||
writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx));
|
||||
writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET);
|
||||
}
|
||||
|
||||
static void qcom_l3_cache__32bit_counter_stop(struct perf_event *event,
|
||||
int flags)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
|
||||
int idx = event->hw.idx;
|
||||
u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL);
|
||||
|
||||
/* Disable the counter */
|
||||
writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR);
|
||||
|
||||
/* Disable interrupt generation by this counter */
|
||||
writel_relaxed(PMINTENCLR(idx), l3pmu->regs + L3_M_BC_INTENCLR);
|
||||
|
||||
/* Set the counter to not assert the overflow IRQ on MSB toggling */
|
||||
writel_relaxed(irqctl & ~PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL);
|
||||
}
|
||||
|
||||
static void qcom_l3_cache__32bit_counter_update(struct perf_event *event)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
|
||||
int idx = event->hw.idx;
|
||||
u32 prev, new;
|
||||
|
||||
do {
|
||||
prev = local64_read(&event->hw.prev_count);
|
||||
new = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
|
||||
} while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
|
||||
|
||||
local64_add(new - prev, &event->count);
|
||||
}
|
||||
|
||||
static const struct l3cache_event_ops event_ops_std = {
|
||||
.start = qcom_l3_cache__32bit_counter_start,
|
||||
.stop = qcom_l3_cache__32bit_counter_stop,
|
||||
.update = qcom_l3_cache__32bit_counter_update,
|
||||
};
|
||||
|
||||
/* Retrieve the appropriate operations for the given event */
|
||||
static
|
||||
const struct l3cache_event_ops *l3cache_event_get_ops(struct perf_event *event)
|
||||
{
|
||||
if (event_uses_long_counter(event))
|
||||
return &event_ops_long;
|
||||
else
|
||||
return &event_ops_std;
|
||||
}
|
||||
|
||||
/*
|
||||
* Top level PMU functions.
|
||||
*/
|
||||
|
||||
static inline void qcom_l3_cache__init(struct l3cache_pmu *l3pmu)
|
||||
{
|
||||
int i;
|
||||
|
||||
writel_relaxed(BC_RESET, l3pmu->regs + L3_M_BC_CR);
|
||||
|
||||
/*
|
||||
* Use writel for the first programming command to ensure the basic
|
||||
* counter unit is stopped before proceeding
|
||||
*/
|
||||
writel(BC_SATROLL_CR_RESET, l3pmu->regs + L3_M_BC_SATROLL_CR);
|
||||
|
||||
writel_relaxed(BC_CNTENCLR_RESET, l3pmu->regs + L3_M_BC_CNTENCLR);
|
||||
writel_relaxed(BC_INTENCLR_RESET, l3pmu->regs + L3_M_BC_INTENCLR);
|
||||
writel_relaxed(PMOVSRCLR_RESET, l3pmu->regs + L3_M_BC_OVSR);
|
||||
writel_relaxed(BC_GANG_RESET, l3pmu->regs + L3_M_BC_GANG);
|
||||
writel_relaxed(BC_IRQCTL_RESET, l3pmu->regs + L3_M_BC_IRQCTL);
|
||||
writel_relaxed(PM_CR_RESET, l3pmu->regs + L3_HML3_PM_CR);
|
||||
|
||||
for (i = 0; i < L3_NUM_COUNTERS; ++i) {
|
||||
writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(i));
|
||||
writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(i));
|
||||
}
|
||||
|
||||
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRA);
|
||||
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRAM);
|
||||
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRB);
|
||||
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRBM);
|
||||
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRC);
|
||||
writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRCM);
|
||||
|
||||
/*
|
||||
* Use writel here to ensure all programming commands are done
|
||||
* before proceeding
|
||||
*/
|
||||
writel(BC_ENABLE, l3pmu->regs + L3_M_BC_CR);
|
||||
}
|
||||
|
||||
static irqreturn_t qcom_l3_cache__handle_irq(int irq_num, void *data)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = data;
|
||||
/* Read the overflow status register */
|
||||
long status = readl_relaxed(l3pmu->regs + L3_M_BC_OVSR);
|
||||
int idx;
|
||||
|
||||
if (status == 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
/* Clear the bits we read on the overflow status register */
|
||||
writel_relaxed(status, l3pmu->regs + L3_M_BC_OVSR);
|
||||
|
||||
for_each_set_bit(idx, &status, L3_NUM_COUNTERS) {
|
||||
struct perf_event *event;
|
||||
const struct l3cache_event_ops *ops;
|
||||
|
||||
event = l3pmu->events[idx];
|
||||
if (!event)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Since the IRQ is not enabled for events using long counters
|
||||
* we should never see one of those here, however, be consistent
|
||||
* and use the ops indirections like in the other operations.
|
||||
*/
|
||||
|
||||
ops = l3cache_event_get_ops(event);
|
||||
ops->update(event);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Implementation of abstract pmu functionality required by
|
||||
* the core perf events code.
|
||||
*/
|
||||
|
||||
static void qcom_l3_cache__pmu_enable(struct pmu *pmu)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
|
||||
|
||||
/* Ensure the other programming commands are observed before enabling */
|
||||
wmb();
|
||||
|
||||
writel_relaxed(BC_ENABLE, l3pmu->regs + L3_M_BC_CR);
|
||||
}
|
||||
|
||||
static void qcom_l3_cache__pmu_disable(struct pmu *pmu)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
|
||||
|
||||
writel_relaxed(0, l3pmu->regs + L3_M_BC_CR);
|
||||
|
||||
/* Ensure the basic counter unit is stopped before proceeding */
|
||||
wmb();
|
||||
}
|
||||
|
||||
/*
|
||||
* We must NOT create groups containing events from multiple hardware PMUs,
|
||||
* although mixing different software and hardware PMUs is allowed.
|
||||
*/
|
||||
static bool qcom_l3_cache__validate_event_group(struct perf_event *event)
|
||||
{
|
||||
struct perf_event *leader = event->group_leader;
|
||||
struct perf_event *sibling;
|
||||
int counters = 0;
|
||||
|
||||
if (leader->pmu != event->pmu && !is_software_event(leader))
|
||||
return false;
|
||||
|
||||
counters = event_num_counters(event);
|
||||
counters += event_num_counters(leader);
|
||||
|
||||
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
|
||||
if (is_software_event(sibling))
|
||||
continue;
|
||||
if (sibling->pmu != event->pmu)
|
||||
return false;
|
||||
counters += event_num_counters(sibling);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the group requires more counters than the HW has, it
|
||||
* cannot ever be scheduled.
|
||||
*/
|
||||
return counters <= L3_NUM_COUNTERS;
|
||||
}
|
||||
|
||||
static int qcom_l3_cache__event_init(struct perf_event *event)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
/*
|
||||
* Is the event for this PMU?
|
||||
*/
|
||||
if (event->attr.type != event->pmu->type)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* There are no per-counter mode filters in the PMU.
|
||||
*/
|
||||
if (event->attr.exclude_user || event->attr.exclude_kernel ||
|
||||
event->attr.exclude_hv || event->attr.exclude_idle)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Sampling not supported since these events are not core-attributable.
|
||||
*/
|
||||
if (hwc->sample_period)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Task mode not available, we run the counters as socket counters,
|
||||
* not attributable to any CPU and therefore cannot attribute per-task.
|
||||
*/
|
||||
if (event->cpu < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Validate the group */
|
||||
if (!qcom_l3_cache__validate_event_group(event))
|
||||
return -EINVAL;
|
||||
|
||||
hwc->idx = -1;
|
||||
|
||||
/*
|
||||
* Many perf core operations (eg. events rotation) operate on a
|
||||
* single CPU context. This is obvious for CPU PMUs, where one
|
||||
* expects the same sets of events being observed on all CPUs,
|
||||
* but can lead to issues for off-core PMUs, like this one, where
|
||||
* each event could be theoretically assigned to a different CPU.
|
||||
* To mitigate this, we enforce CPU assignment to one designated
|
||||
* processor (the one described in the "cpumask" attribute exported
|
||||
* by the PMU device). perf user space tools honor this and avoid
|
||||
* opening more than one copy of the events.
|
||||
*/
|
||||
event->cpu = cpumask_first(&l3pmu->cpumask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qcom_l3_cache__event_start(struct perf_event *event, int flags)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
|
||||
|
||||
hwc->state = 0;
|
||||
ops->start(event);
|
||||
}
|
||||
|
||||
static void qcom_l3_cache__event_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
|
||||
|
||||
if (hwc->state & PERF_HES_STOPPED)
|
||||
return;
|
||||
|
||||
ops->stop(event, flags);
|
||||
if (flags & PERF_EF_UPDATE)
|
||||
ops->update(event);
|
||||
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||
}
|
||||
|
||||
static int qcom_l3_cache__event_add(struct perf_event *event, int flags)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int order = event_uses_long_counter(event) ? 1 : 0;
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* Try to allocate a counter.
|
||||
*/
|
||||
idx = bitmap_find_free_region(l3pmu->used_mask, L3_NUM_COUNTERS, order);
|
||||
if (idx < 0)
|
||||
/* The counters are all in use. */
|
||||
return -EAGAIN;
|
||||
|
||||
hwc->idx = idx;
|
||||
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||
l3pmu->events[idx] = event;
|
||||
|
||||
if (flags & PERF_EF_START)
|
||||
qcom_l3_cache__event_start(event, 0);
|
||||
|
||||
/* Propagate changes to the userspace mapping. */
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qcom_l3_cache__event_del(struct perf_event *event, int flags)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int order = event_uses_long_counter(event) ? 1 : 0;
|
||||
|
||||
/* Stop and clean up */
|
||||
qcom_l3_cache__event_stop(event, flags | PERF_EF_UPDATE);
|
||||
l3pmu->events[hwc->idx] = NULL;
|
||||
bitmap_release_region(l3pmu->used_mask, hwc->idx, order);
|
||||
|
||||
/* Propagate changes to the userspace mapping. */
|
||||
perf_event_update_userpage(event);
|
||||
}
|
||||
|
||||
static void qcom_l3_cache__event_read(struct perf_event *event)
|
||||
{
|
||||
const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
|
||||
|
||||
ops->update(event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add sysfs attributes
|
||||
*
|
||||
* We export:
|
||||
* - formats, used by perf user space and other tools to configure events
|
||||
* - events, used by perf user space and other tools to create events
|
||||
* symbolically, e.g.:
|
||||
* perf stat -a -e l3cache_0_0/event=read-miss/ ls
|
||||
* perf stat -a -e l3cache_0_0/event=0x21/ ls
|
||||
* - cpumask, used by perf user space and other tools to know on which CPUs
|
||||
* to open the events
|
||||
*/
|
||||
|
||||
/* formats */
|
||||
|
||||
static ssize_t l3cache_pmu_format_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dev_ext_attribute *eattr;
|
||||
|
||||
eattr = container_of(attr, struct dev_ext_attribute, attr);
|
||||
return sprintf(buf, "%s\n", (char *) eattr->var);
|
||||
}
|
||||
|
||||
#define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \
|
||||
(&((struct dev_ext_attribute[]) { \
|
||||
{ .attr = __ATTR(_name, 0444, l3cache_pmu_format_show, NULL), \
|
||||
.var = (void *) _config, } \
|
||||
})[0].attr.attr)
|
||||
|
||||
static struct attribute *qcom_l3_cache_pmu_formats[] = {
|
||||
L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7"),
|
||||
L3CACHE_PMU_FORMAT_ATTR(lc, "config:" __stringify(L3_EVENT_LC_BIT)),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group qcom_l3_cache_pmu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = qcom_l3_cache_pmu_formats,
|
||||
};
|
||||
|
||||
/* events */
|
||||
|
||||
static ssize_t l3cache_pmu_event_show(struct device *dev,
|
||||
struct device_attribute *attr, char *page)
|
||||
{
|
||||
struct perf_pmu_events_attr *pmu_attr;
|
||||
|
||||
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
|
||||
return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
|
||||
}
|
||||
|
||||
#define L3CACHE_EVENT_ATTR(_name, _id) \
|
||||
(&((struct perf_pmu_events_attr[]) { \
|
||||
{ .attr = __ATTR(_name, 0444, l3cache_pmu_event_show, NULL), \
|
||||
.id = _id, } \
|
||||
})[0].attr.attr)
|
||||
|
||||
static struct attribute *qcom_l3_cache_pmu_events[] = {
|
||||
L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES),
|
||||
L3CACHE_EVENT_ATTR(read-hit, L3_EVENT_READ_HIT),
|
||||
L3CACHE_EVENT_ATTR(read-miss, L3_EVENT_READ_MISS),
|
||||
L3CACHE_EVENT_ATTR(read-hit-d-side, L3_EVENT_READ_HIT_D),
|
||||
L3CACHE_EVENT_ATTR(read-miss-d-side, L3_EVENT_READ_MISS_D),
|
||||
L3CACHE_EVENT_ATTR(write-hit, L3_EVENT_WRITE_HIT),
|
||||
L3CACHE_EVENT_ATTR(write-miss, L3_EVENT_WRITE_MISS),
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group qcom_l3_cache_pmu_events_group = {
|
||||
.name = "events",
|
||||
.attrs = qcom_l3_cache_pmu_events,
|
||||
};
|
||||
|
||||
/* cpumask */
|
||||
|
||||
static ssize_t qcom_l3_cache_pmu_cpumask_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev));
|
||||
|
||||
return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(cpumask, 0444, qcom_l3_cache_pmu_cpumask_show, NULL);
|
||||
|
||||
static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = {
|
||||
&dev_attr_cpumask.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group qcom_l3_cache_pmu_cpumask_attr_group = {
|
||||
.attrs = qcom_l3_cache_pmu_cpumask_attrs,
|
||||
};
|
||||
|
||||
/*
|
||||
* Per PMU device attribute groups
|
||||
*/
|
||||
static const struct attribute_group *qcom_l3_cache_pmu_attr_grps[] = {
|
||||
&qcom_l3_cache_pmu_format_group,
|
||||
&qcom_l3_cache_pmu_events_group,
|
||||
&qcom_l3_cache_pmu_cpumask_attr_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* Probing functions and data.
|
||||
*/
|
||||
|
||||
static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
|
||||
|
||||
/* If there is not a CPU/PMU association pick this CPU */
|
||||
if (cpumask_empty(&l3pmu->cpumask))
|
||||
cpumask_set_cpu(cpu, &l3pmu->cpumask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
|
||||
unsigned int target;
|
||||
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask))
|
||||
return 0;
|
||||
target = cpumask_any_but(cpu_online_mask, cpu);
|
||||
if (target >= nr_cpu_ids)
|
||||
return 0;
|
||||
perf_pmu_migrate_context(&l3pmu->pmu, cpu, target);
|
||||
cpumask_set_cpu(target, &l3pmu->cpumask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct l3cache_pmu *l3pmu;
|
||||
struct acpi_device *acpi_dev;
|
||||
struct resource *memrc;
|
||||
int ret;
|
||||
char *name;
|
||||
|
||||
/* Initialize the PMU data structures */
|
||||
|
||||
acpi_dev = ACPI_COMPANION(&pdev->dev);
|
||||
if (!acpi_dev)
|
||||
return -ENODEV;
|
||||
|
||||
l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL);
|
||||
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s",
|
||||
acpi_dev->parent->pnp.unique_id, acpi_dev->pnp.unique_id);
|
||||
if (!l3pmu || !name)
|
||||
return -ENOMEM;
|
||||
|
||||
l3pmu->pmu = (struct pmu) {
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
|
||||
.pmu_enable = qcom_l3_cache__pmu_enable,
|
||||
.pmu_disable = qcom_l3_cache__pmu_disable,
|
||||
.event_init = qcom_l3_cache__event_init,
|
||||
.add = qcom_l3_cache__event_add,
|
||||
.del = qcom_l3_cache__event_del,
|
||||
.start = qcom_l3_cache__event_start,
|
||||
.stop = qcom_l3_cache__event_stop,
|
||||
.read = qcom_l3_cache__event_read,
|
||||
|
||||
.attr_groups = qcom_l3_cache_pmu_attr_grps,
|
||||
};
|
||||
|
||||
memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc);
|
||||
if (IS_ERR(l3pmu->regs)) {
|
||||
dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start);
|
||||
return PTR_ERR(l3pmu->regs);
|
||||
}
|
||||
|
||||
qcom_l3_cache__init(l3pmu);
|
||||
|
||||
ret = platform_get_irq(pdev, 0);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, ret, qcom_l3_cache__handle_irq, 0,
|
||||
name, l3pmu);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Request for IRQ failed for slice @%pa\n",
|
||||
&memrc->start);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Add this instance to the list used by the offline callback */
|
||||
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, &l3pmu->node);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Error %d registering hotplug", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = perf_pmu_register(&l3pmu->pmu, name, -1);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to register L3 cache PMU (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "Registered %s, type: %d\n", name, l3pmu->pmu.type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct acpi_device_id qcom_l3_cache_pmu_acpi_match[] = {
|
||||
{ "QCOM8081", },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, qcom_l3_cache_pmu_acpi_match);
|
||||
|
||||
static struct platform_driver qcom_l3_cache_pmu_driver = {
|
||||
.driver = {
|
||||
.name = "qcom-l3cache-pmu",
|
||||
.acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match),
|
||||
},
|
||||
.probe = qcom_l3_cache_pmu_probe,
|
||||
};
|
||||
|
||||
static int __init register_qcom_l3_cache_pmu_driver(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Install a hook to update the reader CPU in case it goes offline */
|
||||
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
|
||||
"perf/qcom/l3cache:online",
|
||||
qcom_l3_cache_pmu_online_cpu,
|
||||
qcom_l3_cache_pmu_offline_cpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return platform_driver_register(&qcom_l3_cache_pmu_driver);
|
||||
}
|
||||
device_initcall(register_qcom_l3_cache_pmu_driver);
|
@ -94,6 +94,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_ARM_VFP_STARTING,
|
||||
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
|
||||
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
|
||||
CPUHP_AP_PERF_ARM_ACPI_STARTING,
|
||||
CPUHP_AP_PERF_ARM_STARTING,
|
||||
CPUHP_AP_ARM_L2X0_STARTING,
|
||||
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||
@ -137,6 +138,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
|
||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||
CPUHP_AP_RCUTREE_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
|
@ -75,6 +75,8 @@ struct pmu_hw_events {
|
||||
* already have to allocate this struct per cpu.
|
||||
*/
|
||||
struct arm_pmu *percpu_pmu;
|
||||
|
||||
int irq;
|
||||
};
|
||||
|
||||
enum armpmu_attr_groups {
|
||||
@ -88,7 +90,6 @@ struct arm_pmu {
|
||||
struct pmu pmu;
|
||||
cpumask_t active_irqs;
|
||||
cpumask_t supported_cpus;
|
||||
int *irq_affinity;
|
||||
char *name;
|
||||
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
||||
void (*enable)(struct perf_event *event);
|
||||
@ -104,12 +105,8 @@ struct arm_pmu {
|
||||
void (*start)(struct arm_pmu *);
|
||||
void (*stop)(struct arm_pmu *);
|
||||
void (*reset)(void *);
|
||||
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
|
||||
void (*free_irq)(struct arm_pmu *);
|
||||
int (*map_event)(struct perf_event *event);
|
||||
int num_events;
|
||||
atomic_t active_events;
|
||||
struct mutex reserve_mutex;
|
||||
u64 max_period;
|
||||
bool secure_access; /* 32-bit ARM only */
|
||||
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
|
||||
@ -120,6 +117,9 @@ struct arm_pmu {
|
||||
struct notifier_block cpu_pm_nb;
|
||||
/* the attr_groups array must be NULL-terminated */
|
||||
const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
|
||||
|
||||
/* Only to be used by ACPI probing code */
|
||||
unsigned long acpi_cpuid;
|
||||
};
|
||||
|
||||
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
|
||||
@ -135,10 +135,12 @@ int armpmu_map_event(struct perf_event *event,
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX],
|
||||
u32 raw_event_mask);
|
||||
|
||||
typedef int (*armpmu_init_fn)(struct arm_pmu *);
|
||||
|
||||
struct pmu_probe_info {
|
||||
unsigned int cpuid;
|
||||
unsigned int mask;
|
||||
int (*init)(struct arm_pmu *);
|
||||
armpmu_init_fn init;
|
||||
};
|
||||
|
||||
#define PMU_PROBE(_cpuid, _mask, _fn) \
|
||||
@ -160,6 +162,21 @@ int arm_pmu_device_probe(struct platform_device *pdev,
|
||||
const struct of_device_id *of_table,
|
||||
const struct pmu_probe_info *probe_table);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
|
||||
#else
|
||||
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
|
||||
#endif
|
||||
|
||||
/* Internal functions only for core arm_pmu code */
|
||||
struct arm_pmu *armpmu_alloc(void);
|
||||
void armpmu_free(struct arm_pmu *pmu);
|
||||
int armpmu_register(struct arm_pmu *pmu);
|
||||
int armpmu_request_irqs(struct arm_pmu *armpmu);
|
||||
void armpmu_free_irqs(struct arm_pmu *armpmu);
|
||||
int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
|
||||
void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
|
||||
|
||||
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
|
||||
|
||||
#endif /* CONFIG_ARM_PMU */
|
||||
|
Loading…
Reference in New Issue
Block a user