Perf: Let platforms decide IRQ request methods.

This is in preparation for adding support for the unicore A5
and dualcore A5, both of which have the same MIDR value.

Instead of adding extra parsing to the ARM generic perf_event file,
this patch moves it to the 'mach' directory where targets types
can be detected in an implementation specific manner.

The default behavior is maintained for all other ARM targets.

Change-Id: I041937273dbbd0fa4c602cf89a2e0fee7f73342b
Signed-off-by: Ashwin Chaugule <ashwinc@codeaurora.org>
This commit is contained in:
Ashwin Chaugule 2012-10-29 16:30:05 -04:00 committed by Steve Kondik
parent 10e31c79fc
commit 0e53dc32b0
4 changed files with 85 additions and 51 deletions

View File

@ -32,6 +32,10 @@ enum arm_pmu_type {
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* before or after calling it.
* @request_pmu_irq: an optional handler in case the platform wants
* to use a percpu IRQ API call. e.g. request_percpu_irq
* @free_pmu_irq: an optional handler in case the platform wants
* to use a percpu IRQ API call. e.g. free_percpu_irq
* @enable_irq: an optional handler which will be called after
* request_irq and be used to handle some platform specific
* irq enablement
@ -42,6 +46,8 @@ enum arm_pmu_type {
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
int (*request_pmu_irq)(int irq, irq_handler_t *irq_h);
void (*free_pmu_irq)(int irq);
void (*enable_irq)(int irq);
void (*disable_irq)(int irq);
};
@ -115,8 +121,8 @@ struct arm_pmu {
struct platform_device *plat_device;
u32 from_idle;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
int (*request_pmu_irq)(int irq, irq_handler_t *irq_h);
void (*free_pmu_irq)(int irq);
int (*request_pmu_irq)(int irq, irq_handler_t *irq_h);
void (*free_pmu_irq)(int irq);
void (*enable)(struct hw_perf_event *evt, int idx, int cpu);
void (*disable)(struct hw_perf_event *evt, int idx);
int (*get_event_idx)(struct pmu_hw_events *hw_events,

View File

@ -436,6 +436,16 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
else
handle_irq = armpmu->handle_irq;
if (plat && plat->request_pmu_irq)
armpmu->request_pmu_irq = plat->request_pmu_irq;
else
armpmu->request_pmu_irq = armpmu_generic_request_irq;
if (plat && plat->free_pmu_irq)
armpmu->free_pmu_irq = plat->free_pmu_irq;
else
armpmu->free_pmu_irq = armpmu_generic_free_irq;
irqs = min(pmu_device->num_resources, num_possible_cpus());
if (irqs < 1) {
pr_err("no irqs for PMUs defined\n");

View File

@ -520,53 +520,6 @@ static void krait_pmu_reset(void *info)
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
}
static void enable_irq_callback(void *info)
{
int irq = *(unsigned int *)info;
enable_percpu_irq(irq, IRQ_TYPE_EDGE_RISING);
}
static void disable_irq_callback(void *info)
{
int irq = *(unsigned int *)info;
disable_percpu_irq(irq);
}
static int
msm_request_irq(int irq, irq_handler_t *handle_irq)
{
int err = 0;
int cpu;
err = request_percpu_irq(irq, *handle_irq, "l1-armpmu",
&cpu_hw_events);
if (!err) {
for_each_cpu(cpu, cpu_online_mask) {
smp_call_function_single(cpu,
enable_irq_callback, &irq, 1);
}
}
return err;
}
static void
msm_free_irq(int irq)
{
int cpu;
if (irq >= 0) {
for_each_cpu(cpu, cpu_online_mask) {
smp_call_function_single(cpu,
disable_irq_callback, &irq, 1);
}
free_percpu_irq(irq, &cpu_hw_events);
}
}
/*
* We check for column exclusion constraints here.
* Two events cant have same reg and same group.
@ -621,8 +574,6 @@ static int msm_clear_ev_constraint(struct perf_event *event)
static struct arm_pmu krait_pmu = {
.handle_irq = armv7pmu_handle_irq,
.request_pmu_irq = msm_request_irq,
.free_pmu_irq = msm_free_irq,
.enable = krait_pmu_enable_event,
.disable = krait_pmu_disable_event,
.read_counter = armv7pmu_read_counter,

View File

@ -11,8 +11,64 @@
*/
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <asm/pmu.h>
#include <mach/irqs.h>
#include <mach/socinfo.h>
#if defined(CONFIG_ARCH_MSM_KRAITMP) || defined(CONFIG_ARCH_MSM_SCORPIONMP)
static DEFINE_PER_CPU(u32, pmu_irq_cookie);
static void enable_irq_callback(void *info)
{
int irq = *(unsigned int *)info;
enable_percpu_irq(irq, IRQ_TYPE_EDGE_RISING);
}
static void disable_irq_callback(void *info)
{
int irq = *(unsigned int *)info;
disable_percpu_irq(irq);
}
static int
multicore_request_irq(int irq, irq_handler_t *handle_irq)
{
int err = 0;
int cpu;
err = request_percpu_irq(irq, *handle_irq, "l1-armpmu",
&pmu_irq_cookie);
if (!err) {
for_each_cpu(cpu, cpu_online_mask) {
smp_call_function_single(cpu,
enable_irq_callback, &irq, 1);
}
}
return err;
}
static void
multicore_free_irq(int irq)
{
int cpu;
if (irq >= 0) {
for_each_cpu(cpu, cpu_online_mask) {
smp_call_function_single(cpu,
disable_irq_callback, &irq, 1);
}
free_percpu_irq(irq, &pmu_irq_cookie);
}
}
static struct arm_pmu_platdata multicore_data = {
.request_pmu_irq = multicore_request_irq,
.free_pmu_irq = multicore_free_irq,
};
#endif
static struct resource cpu_pmu_resource[] = {
{
@ -47,6 +103,7 @@ static struct platform_device cpu_pmu_device = {
.num_resources = ARRAY_SIZE(cpu_pmu_resource),
};
static struct platform_device *pmu_devices[] = {
&cpu_pmu_device,
#ifdef CONFIG_CPU_HAS_L2_PMU
@ -56,6 +113,16 @@ static struct platform_device *pmu_devices[] = {
static int __init msm_pmu_init(void)
{
/*
* For the targets we know are multicore's set the request/free IRQ
* handlers to call the percpu API.
* Defaults to unicore API {request,free}_irq().
* See arch/arm/kernel/perf_event.c
*/
#if defined(CONFIG_ARCH_MSM_KRAITMP) || defined(CONFIG_ARCH_MSM_SCORPIONMP)
cpu_pmu_device.dev.platform_data = &multicore_data;
#endif
return platform_add_devices(pmu_devices, ARRAY_SIZE(pmu_devices));
}