Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * linux/arch/arm/kernel/pmu.c |
| 3 | * |
| 4 | * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #include <linux/cpumask.h> |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/module.h> |
| 17 | |
| 18 | #include <asm/pmu.h> |
| 19 | |
| 20 | /* |
| 21 | * Define the IRQs for the system. We could use something like a platform |
| 22 | * device but that seems fairly heavyweight for this. Also, the performance |
| 23 | * counters can't be removed or hotplugged. |
| 24 | * |
| 25 | * Ordering is important: init_pmu() will use the ordering to set the affinity |
| 26 | * to the corresponding core. e.g. the first interrupt will go to cpu 0, the |
| 27 | * second goes to cpu 1 etc. |
| 28 | */ |
| 29 | static const int irqs[] = { |
| 30 | #if defined(CONFIG_ARCH_OMAP2) |
| 31 | 3, |
| 32 | #elif defined(CONFIG_ARCH_BCMRING) |
| 33 | IRQ_PMUIRQ, |
| 34 | #elif defined(CONFIG_MACH_REALVIEW_EB) |
| 35 | IRQ_EB11MP_PMU_CPU0, |
| 36 | IRQ_EB11MP_PMU_CPU1, |
| 37 | IRQ_EB11MP_PMU_CPU2, |
| 38 | IRQ_EB11MP_PMU_CPU3, |
| 39 | #elif defined(CONFIG_ARCH_OMAP3) |
| 40 | INT_34XX_BENCH_MPU_EMUL, |
| 41 | #elif defined(CONFIG_ARCH_IOP32X) |
| 42 | IRQ_IOP32X_CORE_PMU, |
| 43 | #elif defined(CONFIG_ARCH_IOP33X) |
| 44 | IRQ_IOP33X_CORE_PMU, |
| 45 | #elif defined(CONFIG_ARCH_PXA) |
| 46 | IRQ_PMU, |
| 47 | #endif |
| 48 | }; |
| 49 | |
| 50 | static const struct pmu_irqs pmu_irqs = { |
| 51 | .irqs = irqs, |
| 52 | .num_irqs = ARRAY_SIZE(irqs), |
| 53 | }; |
| 54 | |
| 55 | static volatile long pmu_lock; |
| 56 | |
| 57 | const struct pmu_irqs * |
| 58 | reserve_pmu(void) |
| 59 | { |
| 60 | return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) : |
| 61 | &pmu_irqs; |
| 62 | } |
| 63 | EXPORT_SYMBOL_GPL(reserve_pmu); |
| 64 | |
| 65 | int |
| 66 | release_pmu(const struct pmu_irqs *irqs) |
| 67 | { |
| 68 | if (WARN_ON(irqs != &pmu_irqs)) |
| 69 | return -EINVAL; |
| 70 | clear_bit_unlock(0, &pmu_lock); |
| 71 | return 0; |
| 72 | } |
| 73 | EXPORT_SYMBOL_GPL(release_pmu); |
| 74 | |
| 75 | static int |
| 76 | set_irq_affinity(int irq, |
| 77 | unsigned int cpu) |
| 78 | { |
| 79 | #ifdef CONFIG_SMP |
| 80 | int err = irq_set_affinity(irq, cpumask_of(cpu)); |
| 81 | if (err) |
| 82 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", |
| 83 | irq, cpu); |
| 84 | return err; |
| 85 | #else |
| 86 | return 0; |
| 87 | #endif |
| 88 | } |
| 89 | |
| 90 | int |
| 91 | init_pmu(void) |
| 92 | { |
| 93 | int i, err = 0; |
| 94 | |
| 95 | for (i = 0; i < pmu_irqs.num_irqs; ++i) { |
| 96 | err = set_irq_affinity(pmu_irqs.irqs[i], i); |
| 97 | if (err) |
| 98 | break; |
| 99 | } |
| 100 | |
| 101 | return err; |
| 102 | } |
| 103 | EXPORT_SYMBOL_GPL(init_pmu); |