Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/include/asm/pmu.h |
| 3 | * |
| 4 | * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #ifndef __ARM_PMU_H__ |
| 13 | #define __ARM_PMU_H__ |
| 14 | |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 15 | #include <linux/interrupt.h> |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 16 | #include <linux/perf_event.h> |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 17 | |
Will Deacon | b0e8959 | 2011-07-26 22:10:28 +0100 | [diff] [blame] | 18 | /* |
| 19 | * Types of PMUs that can be accessed directly and require mutual |
| 20 | * exclusion between profiling tools. |
| 21 | */ |
Will Deacon | 28d7f4e | 2010-04-29 17:11:45 +0100 | [diff] [blame] | 22 | enum arm_pmu_type { |
| 23 | ARM_PMU_DEVICE_CPU = 0, |
| 24 | ARM_NUM_PMU_DEVICES, |
| 25 | }; |
| 26 | |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 27 | /* |
| 28 | * struct arm_pmu_platdata - ARM PMU platform data |
| 29 | * |
Ming Lei | e0516a6 | 2011-03-02 15:00:08 +0800 | [diff] [blame] | 30 | * @handle_irq: an optional handler which will be called from the |
| 31 | * interrupt and passed the address of the low level handler, |
| 32 | * and can be used to implement any platform specific handling |
| 33 | * before or after calling it. |
Jon Hunter | 7be2958 | 2012-05-31 13:05:20 -0500 | [diff] [blame^] | 34 | * @runtime_resume: an optional handler which will be called by the |
| 35 | * runtime PM framework following a call to pm_runtime_get(). |
| 36 | * Note that if pm_runtime_get() is called more than once in |
| 37 | * succession this handler will only be called once. |
| 38 | * @runtime_suspend: an optional handler which will be called by the |
| 39 | * runtime PM framework following a call to pm_runtime_put(). |
| 40 | * Note that if pm_runtime_get() is called more than once in |
| 41 | * succession this handler will only be called following the |
| 42 | * final call to pm_runtime_put() that actually disables the |
| 43 | * hardware. |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 44 | */ |
| 45 | struct arm_pmu_platdata { |
| 46 | irqreturn_t (*handle_irq)(int irq, void *dev, |
| 47 | irq_handler_t pmu_handler); |
Jon Hunter | 7be2958 | 2012-05-31 13:05:20 -0500 | [diff] [blame^] | 48 | int (*runtime_resume)(struct device *dev); |
| 49 | int (*runtime_suspend)(struct device *dev); |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 50 | }; |
| 51 | |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 52 | #ifdef CONFIG_CPU_HAS_PMU |
| 53 | |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 54 | /** |
| 55 | * reserve_pmu() - reserve the hardware performance counters |
| 56 | * |
| 57 | * Reserve the hardware performance counters in the system for exclusive use. |
Will Deacon | b0e8959 | 2011-07-26 22:10:28 +0100 | [diff] [blame] | 58 | * Returns 0 on success or -EBUSY if the lock is already held. |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 59 | */ |
Will Deacon | b0e8959 | 2011-07-26 22:10:28 +0100 | [diff] [blame] | 60 | extern int |
Mark Rutland | 7fdd3c4 | 2011-08-12 10:42:48 +0100 | [diff] [blame] | 61 | reserve_pmu(enum arm_pmu_type type); |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 62 | |
| 63 | /** |
| 64 | * release_pmu() - Relinquish control of the performance counters |
| 65 | * |
| 66 | * Release the performance counters and allow someone else to use them. |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 67 | */ |
Will Deacon | b0e8959 | 2011-07-26 22:10:28 +0100 | [diff] [blame] | 68 | extern void |
Mark Rutland | f12482c | 2011-06-22 15:30:51 +0100 | [diff] [blame] | 69 | release_pmu(enum arm_pmu_type type); |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 70 | |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 71 | #else /* CONFIG_CPU_HAS_PMU */ |
| 72 | |
Will Deacon | 49c006b | 2010-04-29 17:13:24 +0100 | [diff] [blame] | 73 | #include <linux/err.h> |
| 74 | |
Will Deacon | b0e8959 | 2011-07-26 22:10:28 +0100 | [diff] [blame] | 75 | static inline int |
Mark Rutland | 7fdd3c4 | 2011-08-12 10:42:48 +0100 | [diff] [blame] | 76 | reserve_pmu(enum arm_pmu_type type) |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 77 | { |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 78 | return -ENODEV; |
| 79 | } |
| 80 | |
Will Deacon | b0e8959 | 2011-07-26 22:10:28 +0100 | [diff] [blame] | 81 | static inline void |
| 82 | release_pmu(enum arm_pmu_type type) { } |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 83 | |
| 84 | #endif /* CONFIG_CPU_HAS_PMU */ |
| 85 | |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 86 | #ifdef CONFIG_HW_PERF_EVENTS |
| 87 | |
| 88 | /* The events for a given PMU register set. */ |
| 89 | struct pmu_hw_events { |
| 90 | /* |
| 91 | * The events that are active on the PMU for the given index. |
| 92 | */ |
| 93 | struct perf_event **events; |
| 94 | |
| 95 | /* |
| 96 | * A 1 bit for an index indicates that the counter is being used for |
| 97 | * an event. A 0 means that the counter can be used. |
| 98 | */ |
| 99 | unsigned long *used_mask; |
| 100 | |
| 101 | /* |
| 102 | * Hardware lock to serialize accesses to PMU registers. Needed for the |
| 103 | * read/modify/write sequences. |
| 104 | */ |
| 105 | raw_spinlock_t pmu_lock; |
| 106 | }; |
| 107 | |
| 108 | struct arm_pmu { |
| 109 | struct pmu pmu; |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 110 | enum arm_pmu_type type; |
| 111 | cpumask_t active_irqs; |
Will Deacon | 4295b89 | 2012-07-06 15:45:00 +0100 | [diff] [blame] | 112 | char *name; |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 113 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
| 114 | void (*enable)(struct hw_perf_event *evt, int idx); |
| 115 | void (*disable)(struct hw_perf_event *evt, int idx); |
| 116 | int (*get_event_idx)(struct pmu_hw_events *hw_events, |
| 117 | struct hw_perf_event *hwc); |
| 118 | int (*set_event_filter)(struct hw_perf_event *evt, |
| 119 | struct perf_event_attr *attr); |
| 120 | u32 (*read_counter)(int idx); |
| 121 | void (*write_counter)(int idx, u32 val); |
| 122 | void (*start)(void); |
| 123 | void (*stop)(void); |
| 124 | void (*reset)(void *); |
| 125 | int (*map_event)(struct perf_event *event); |
| 126 | int num_events; |
| 127 | atomic_t active_events; |
| 128 | struct mutex reserve_mutex; |
| 129 | u64 max_period; |
| 130 | struct platform_device *plat_device; |
| 131 | struct pmu_hw_events *(*get_hw_events)(void); |
| 132 | }; |
| 133 | |
| 134 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) |
| 135 | |
| 136 | int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); |
| 137 | |
| 138 | u64 armpmu_event_update(struct perf_event *event, |
| 139 | struct hw_perf_event *hwc, |
Will Deacon | 5727347 | 2012-03-06 17:33:17 +0100 | [diff] [blame] | 140 | int idx); |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 141 | |
| 142 | int armpmu_event_set_period(struct perf_event *event, |
| 143 | struct hw_perf_event *hwc, |
| 144 | int idx); |
| 145 | |
| 146 | #endif /* CONFIG_HW_PERF_EVENTS */ |
| 147 | |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 148 | #endif /* __ARM_PMU_H__ */ |