Perf: Toggle PMU IRQ when CPU's are hotplugged
When a CPU is hotplugged out while a perf session
is active, disarm the IRQ when the CPU is preparing
to die. This ensures that perf doesn't lock up when
it tries to free the irq of a hotplugged CPU.
Similarly, when a CPU comes online during a perf session
enable the IRQ so that perf doesn't try to disable
an unarmed IRQ when it finishes.
Change-Id: Ic4e412e5f1effae0db34a3e4b5e7e5c65faed2a0
Signed-off-by: Ashwin Chaugule <ashwinc@codeaurora.org>
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d2e2e44..85b1bb3 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -739,6 +739,36 @@
armpmu->type = ARM_PMU_DEVICE_CPU;
}
+static int cpu_has_active_perf(void)
+{
+ struct pmu_hw_events *hw_events;
+ int enabled;
+
+ if (!cpu_pmu)
+ return 0;
+
+ hw_events = cpu_pmu->get_hw_events();
+ enabled = bitmap_weight(hw_events->used_mask, cpu_pmu->num_events);
+
+ if (enabled)
+ /*Even one event's existence is good enough.*/
+ return 1;
+
+ return 0;
+}
+
+void enable_irq_callback(void *info)
+{
+ int irq = *(unsigned int *)info;
+ enable_percpu_irq(irq, IRQ_TYPE_EDGE_RISING);
+}
+
+void disable_irq_callback(void *info)
+{
+ int irq = *(unsigned int *)info;
+ disable_percpu_irq(irq);
+}
+
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
@@ -748,12 +778,50 @@
static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
+ int irq;
+
+ if (cpu_has_active_perf()) {
+ switch ((action & ~CPU_TASKS_FROZEN)) {
+
+ case CPU_DOWN_PREPARE:
+ /*
+ * If this is on a multicore CPU, we need
+ * to disarm the PMU IRQ before disappearing.
+ */
+ if (cpu_pmu &&
+ cpu_pmu->plat_device->dev.platform_data) {
+ irq = platform_get_irq(cpu_pmu->plat_device, 1);
+ smp_call_function_single((int)hcpu,
+ disable_irq_callback, &irq, 1);
+ }
+ return NOTIFY_DONE;
+
+ case CPU_UP_PREPARE:
+ /*
+ * If this is on a multicore CPU, we need
+ * to arm the PMU IRQ before appearing.
+ */
+ if (cpu_pmu &&
+ cpu_pmu->plat_device->dev.platform_data) {
+ irq = platform_get_irq(cpu_pmu->plat_device, 1);
+ smp_call_function_single((int)hcpu,
+ enable_irq_callback, &irq, 1);
+ }
+ return NOTIFY_DONE;
+
+ case CPU_STARTING:
+ if (cpu_pmu && cpu_pmu->reset) {
+ cpu_pmu->reset(NULL);
+ return NOTIFY_OK;
+ }
+ default:
+ return NOTIFY_DONE;
+ }
+ }
+
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
- if (cpu_pmu && cpu_pmu->reset)
- cpu_pmu->reset(NULL);
-
return NOTIFY_OK;
}
@@ -777,24 +845,6 @@
}
}
-static int cpu_has_active_perf(void)
-{
- struct pmu_hw_events *hw_events;
- int enabled;
-
- if (!cpu_pmu)
- return 0;
-
- hw_events = cpu_pmu->get_hw_events();
- enabled = bitmap_weight(hw_events->used_mask, cpu_pmu->num_events);
-
- if (enabled)
- /*Even one event's existence is good enough.*/
- return 1;
-
- return 0;
-}
-
static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
.notifier_call = pmu_cpu_notify,
};