blob: 40d7dff8bc306db0c8a9ddaee92ddc097ee46179 [file] [log] [blame]
Jamie Iles0f4f0672010-02-02 20:23:15 +01001/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053015#include <linux/interrupt.h>
Mark Rutland0ce47082011-05-19 10:07:57 +010016#include <linux/perf_event.h>
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053017
Will Deaconb0e89592011-07-26 22:10:28 +010018/*
19 * Types of PMUs that can be accessed directly and require mutual
20 * exclusion between profiling tools.
21 */
Will Deacon28d7f4e2010-04-29 17:11:45 +010022enum arm_pmu_type {
23 ARM_PMU_DEVICE_CPU = 0,
24 ARM_NUM_PMU_DEVICES,
25};
26
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053027/*
28 * struct arm_pmu_platdata - ARM PMU platform data
29 *
Ming Leie0516a62011-03-02 15:00:08 +080030 * @handle_irq: an optional handler which will be called from the
31 * interrupt and passed the address of the low level handler,
32 * and can be used to implement any platform specific handling
33 * before or after calling it.
Jon Hunter7be29582012-05-31 13:05:20 -050034 * @runtime_resume: an optional handler which will be called by the
35 * runtime PM framework following a call to pm_runtime_get().
36 * Note that if pm_runtime_get() is called more than once in
37 * succession this handler will only be called once.
38 * @runtime_suspend: an optional handler which will be called by the
39 * runtime PM framework following a call to pm_runtime_put().
40 * Note that if pm_runtime_get() is called more than once in
41 * succession this handler will only be called following the
42 * final call to pm_runtime_put() that actually disables the
43 * hardware.
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053044 */
45struct arm_pmu_platdata {
46 irqreturn_t (*handle_irq)(int irq, void *dev,
47 irq_handler_t pmu_handler);
Jon Hunter7be29582012-05-31 13:05:20 -050048 int (*runtime_resume)(struct device *dev);
49 int (*runtime_suspend)(struct device *dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053050};
51
Jamie Iles0f4f0672010-02-02 20:23:15 +010052#ifdef CONFIG_CPU_HAS_PMU
53
Jamie Iles0f4f0672010-02-02 20:23:15 +010054/**
55 * reserve_pmu() - reserve the hardware performance counters
56 *
57 * Reserve the hardware performance counters in the system for exclusive use.
Will Deaconb0e89592011-07-26 22:10:28 +010058 * Returns 0 on success or -EBUSY if the lock is already held.
Jamie Iles0f4f0672010-02-02 20:23:15 +010059 */
Will Deaconb0e89592011-07-26 22:10:28 +010060extern int
Mark Rutland7fdd3c42011-08-12 10:42:48 +010061reserve_pmu(enum arm_pmu_type type);
Jamie Iles0f4f0672010-02-02 20:23:15 +010062
63/**
64 * release_pmu() - Relinquish control of the performance counters
65 *
66 * Release the performance counters and allow someone else to use them.
Jamie Iles0f4f0672010-02-02 20:23:15 +010067 */
Will Deaconb0e89592011-07-26 22:10:28 +010068extern void
Mark Rutlandf12482c2011-06-22 15:30:51 +010069release_pmu(enum arm_pmu_type type);
Jamie Iles0f4f0672010-02-02 20:23:15 +010070
Jamie Iles0f4f0672010-02-02 20:23:15 +010071#else /* CONFIG_CPU_HAS_PMU */
72
Will Deacon49c006b2010-04-29 17:13:24 +010073#include <linux/err.h>
74
Will Deaconb0e89592011-07-26 22:10:28 +010075static inline int
Mark Rutland7fdd3c42011-08-12 10:42:48 +010076reserve_pmu(enum arm_pmu_type type)
Jamie Iles0f4f0672010-02-02 20:23:15 +010077{
Jamie Iles0f4f0672010-02-02 20:23:15 +010078 return -ENODEV;
79}
80
Will Deaconb0e89592011-07-26 22:10:28 +010081static inline void
82release_pmu(enum arm_pmu_type type) { }
Jamie Iles0f4f0672010-02-02 20:23:15 +010083
84#endif /* CONFIG_CPU_HAS_PMU */
85
Mark Rutland0ce47082011-05-19 10:07:57 +010086#ifdef CONFIG_HW_PERF_EVENTS
87
88/* The events for a given PMU register set. */
89struct pmu_hw_events {
90 /*
91 * The events that are active on the PMU for the given index.
92 */
93 struct perf_event **events;
94
95 /*
96 * A 1 bit for an index indicates that the counter is being used for
97 * an event. A 0 means that the counter can be used.
98 */
99 unsigned long *used_mask;
100
101 /*
102 * Hardware lock to serialize accesses to PMU registers. Needed for the
103 * read/modify/write sequences.
104 */
105 raw_spinlock_t pmu_lock;
106};
107
108struct arm_pmu {
109 struct pmu pmu;
Mark Rutland0ce47082011-05-19 10:07:57 +0100110 enum arm_pmu_type type;
111 cpumask_t active_irqs;
Will Deacon4295b892012-07-06 15:45:00 +0100112 char *name;
Mark Rutland0ce47082011-05-19 10:07:57 +0100113 irqreturn_t (*handle_irq)(int irq_num, void *dev);
114 void (*enable)(struct hw_perf_event *evt, int idx);
115 void (*disable)(struct hw_perf_event *evt, int idx);
116 int (*get_event_idx)(struct pmu_hw_events *hw_events,
117 struct hw_perf_event *hwc);
118 int (*set_event_filter)(struct hw_perf_event *evt,
119 struct perf_event_attr *attr);
120 u32 (*read_counter)(int idx);
121 void (*write_counter)(int idx, u32 val);
122 void (*start)(void);
123 void (*stop)(void);
124 void (*reset)(void *);
125 int (*map_event)(struct perf_event *event);
126 int num_events;
127 atomic_t active_events;
128 struct mutex reserve_mutex;
129 u64 max_period;
130 struct platform_device *plat_device;
131 struct pmu_hw_events *(*get_hw_events)(void);
132};
133
134#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
135
136int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
137
138u64 armpmu_event_update(struct perf_event *event,
139 struct hw_perf_event *hwc,
Will Deacon57273472012-03-06 17:33:17 +0100140 int idx);
Mark Rutland0ce47082011-05-19 10:07:57 +0100141
142int armpmu_event_set_period(struct perf_event *event,
143 struct hw_perf_event *hwc,
144 int idx);
145
146#endif /* CONFIG_HW_PERF_EVENTS */
147
Jamie Iles0f4f0672010-02-02 20:23:15 +0100148#endif /* __ARM_PMU_H__ */