| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /** | 
|  | 2 | * @file cpu_buffer.c | 
|  | 3 | * | 
|  | 4 | * @remark Copyright 2002 OProfile authors | 
|  | 5 | * @remark Read the file COPYING | 
|  | 6 | * | 
|  | 7 | * @author John Levon <levon@movementarian.org> | 
|  | 8 | * | 
|  | 9 | * Each CPU has a local buffer that stores PC value/event | 
|  | 10 | * pairs. We also log context switches when we notice them. | 
|  | 11 | * Eventually each CPU's buffer is processed into the global | 
|  | 12 | * event buffer by sync_buffer(). | 
|  | 13 | * | 
|  | 14 | * We use a local buffer for two reasons: an NMI or similar | 
|  | 15 | * interrupt cannot synchronise, and high sampling rates | 
|  | 16 | * would lead to catastrophic global synchronisation if | 
|  | 17 | * a global buffer was used. | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | #include <linux/sched.h> | 
|  | 21 | #include <linux/oprofile.h> | 
|  | 22 | #include <linux/vmalloc.h> | 
|  | 23 | #include <linux/errno.h> | 
|  | 24 |  | 
|  | 25 | #include "event_buffer.h" | 
|  | 26 | #include "cpu_buffer.h" | 
|  | 27 | #include "buffer_sync.h" | 
|  | 28 | #include "oprof.h" | 
|  | 29 |  | 
|  | 30 | struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; | 
|  | 31 |  | 
|  | 32 | static void wq_sync_buffer(void *); | 
|  | 33 |  | 
|  | 34 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | 
|  | 35 | static int work_enabled; | 
|  | 36 |  | 
|  | 37 | void free_cpu_buffers(void) | 
|  | 38 | { | 
|  | 39 | int i; | 
|  | 40 |  | 
| Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 41 | for_each_online_cpu(i) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | vfree(cpu_buffer[i].buffer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | } | 
| Jesper Juhl | 77933d7 | 2005-07-27 11:46:09 -0700 | [diff] [blame] | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | int alloc_cpu_buffers(void) | 
|  | 46 | { | 
|  | 47 | int i; | 
|  | 48 |  | 
|  | 49 | unsigned long buffer_size = fs_cpu_buffer_size; | 
|  | 50 |  | 
|  | 51 | for_each_online_cpu(i) { | 
|  | 52 | struct oprofile_cpu_buffer * b = &cpu_buffer[i]; | 
|  | 53 |  | 
| Eric Dumazet | 25ab7cd | 2006-01-08 01:03:21 -0800 | [diff] [blame] | 54 | b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, | 
|  | 55 | cpu_to_node(i)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | if (!b->buffer) | 
|  | 57 | goto fail; | 
|  | 58 |  | 
|  | 59 | b->last_task = NULL; | 
|  | 60 | b->last_is_kernel = -1; | 
|  | 61 | b->tracing = 0; | 
|  | 62 | b->buffer_size = buffer_size; | 
|  | 63 | b->tail_pos = 0; | 
|  | 64 | b->head_pos = 0; | 
|  | 65 | b->sample_received = 0; | 
|  | 66 | b->sample_lost_overflow = 0; | 
|  | 67 | b->cpu = i; | 
|  | 68 | INIT_WORK(&b->work, wq_sync_buffer, b); | 
|  | 69 | } | 
|  | 70 | return 0; | 
|  | 71 |  | 
|  | 72 | fail: | 
|  | 73 | free_cpu_buffers(); | 
|  | 74 | return -ENOMEM; | 
|  | 75 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 |  | 
|  | 77 | void start_cpu_work(void) | 
|  | 78 | { | 
|  | 79 | int i; | 
|  | 80 |  | 
|  | 81 | work_enabled = 1; | 
|  | 82 |  | 
|  | 83 | for_each_online_cpu(i) { | 
|  | 84 | struct oprofile_cpu_buffer * b = &cpu_buffer[i]; | 
|  | 85 |  | 
|  | 86 | /* | 
|  | 87 | * Spread the work by 1 jiffy per cpu so they dont all | 
|  | 88 | * fire at once. | 
|  | 89 | */ | 
|  | 90 | schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); | 
|  | 91 | } | 
|  | 92 | } | 
|  | 93 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | void end_cpu_work(void) | 
|  | 95 | { | 
|  | 96 | int i; | 
|  | 97 |  | 
|  | 98 | work_enabled = 0; | 
|  | 99 |  | 
|  | 100 | for_each_online_cpu(i) { | 
|  | 101 | struct oprofile_cpu_buffer * b = &cpu_buffer[i]; | 
|  | 102 |  | 
|  | 103 | cancel_delayed_work(&b->work); | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | flush_scheduled_work(); | 
|  | 107 | } | 
|  | 108 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | /* Resets the cpu buffer to a sane state. */ | 
|  | 110 | void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf) | 
|  | 111 | { | 
|  | 112 | /* reset these to invalid values; the next sample | 
|  | 113 | * collected will populate the buffer with proper | 
|  | 114 | * values to initialize the buffer | 
|  | 115 | */ | 
|  | 116 | cpu_buf->last_is_kernel = -1; | 
|  | 117 | cpu_buf->last_task = NULL; | 
|  | 118 | } | 
|  | 119 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | /* compute number of available slots in cpu_buffer queue */ | 
|  | 121 | static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b) | 
|  | 122 | { | 
|  | 123 | unsigned long head = b->head_pos; | 
|  | 124 | unsigned long tail = b->tail_pos; | 
|  | 125 |  | 
|  | 126 | if (tail > head) | 
|  | 127 | return (tail - head) - 1; | 
|  | 128 |  | 
|  | 129 | return tail + (b->buffer_size - head) - 1; | 
|  | 130 | } | 
|  | 131 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | static void increment_head(struct oprofile_cpu_buffer * b) | 
|  | 133 | { | 
|  | 134 | unsigned long new_head = b->head_pos + 1; | 
|  | 135 |  | 
|  | 136 | /* Ensure anything written to the slot before we | 
|  | 137 | * increment is visible */ | 
|  | 138 | wmb(); | 
|  | 139 |  | 
|  | 140 | if (new_head < b->buffer_size) | 
|  | 141 | b->head_pos = new_head; | 
|  | 142 | else | 
|  | 143 | b->head_pos = 0; | 
|  | 144 | } | 
|  | 145 |  | 
| Jesper Juhl | 77933d7 | 2005-07-27 11:46:09 -0700 | [diff] [blame] | 146 | static inline void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | add_sample(struct oprofile_cpu_buffer * cpu_buf, | 
|  | 148 | unsigned long pc, unsigned long event) | 
|  | 149 | { | 
|  | 150 | struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos]; | 
|  | 151 | entry->eip = pc; | 
|  | 152 | entry->event = event; | 
|  | 153 | increment_head(cpu_buf); | 
|  | 154 | } | 
|  | 155 |  | 
| Jesper Juhl | 77933d7 | 2005-07-27 11:46:09 -0700 | [diff] [blame] | 156 | static inline void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | add_code(struct oprofile_cpu_buffer * buffer, unsigned long value) | 
|  | 158 | { | 
|  | 159 | add_sample(buffer, ESCAPE_CODE, value); | 
|  | 160 | } | 
|  | 161 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | /* This must be safe from any context. It's safe writing here | 
|  | 163 | * because of the head/tail separation of the writer and reader | 
|  | 164 | * of the CPU buffer. | 
|  | 165 | * | 
|  | 166 | * is_kernel is needed because on some architectures you cannot | 
|  | 167 | * tell if you are in kernel or user space simply by looking at | 
|  | 168 | * pc. We tag this in the buffer by generating kernel enter/exit | 
|  | 169 | * events whenever is_kernel changes | 
|  | 170 | */ | 
|  | 171 | static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, | 
|  | 172 | int is_kernel, unsigned long event) | 
|  | 173 | { | 
|  | 174 | struct task_struct * task; | 
|  | 175 |  | 
|  | 176 | cpu_buf->sample_received++; | 
|  | 177 |  | 
|  | 178 | if (nr_available_slots(cpu_buf) < 3) { | 
|  | 179 | cpu_buf->sample_lost_overflow++; | 
|  | 180 | return 0; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | is_kernel = !!is_kernel; | 
|  | 184 |  | 
|  | 185 | task = current; | 
|  | 186 |  | 
|  | 187 | /* notice a switch from user->kernel or vice versa */ | 
|  | 188 | if (cpu_buf->last_is_kernel != is_kernel) { | 
|  | 189 | cpu_buf->last_is_kernel = is_kernel; | 
|  | 190 | add_code(cpu_buf, is_kernel); | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | /* notice a task switch */ | 
|  | 194 | if (cpu_buf->last_task != task) { | 
|  | 195 | cpu_buf->last_task = task; | 
|  | 196 | add_code(cpu_buf, (unsigned long)task); | 
|  | 197 | } | 
|  | 198 |  | 
|  | 199 | add_sample(cpu_buf, pc, event); | 
|  | 200 | return 1; | 
|  | 201 | } | 
|  | 202 |  | 
|  | 203 | static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf) | 
|  | 204 | { | 
|  | 205 | if (nr_available_slots(cpu_buf) < 4) { | 
|  | 206 | cpu_buf->sample_lost_overflow++; | 
|  | 207 | return 0; | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | add_code(cpu_buf, CPU_TRACE_BEGIN); | 
|  | 211 | cpu_buf->tracing = 1; | 
|  | 212 | return 1; | 
|  | 213 | } | 
|  | 214 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) | 
|  | 216 | { | 
|  | 217 | cpu_buf->tracing = 0; | 
|  | 218 | } | 
|  | 219 |  | 
| Brian Rogan | 2735771 | 2006-03-28 01:56:20 -0800 | [diff] [blame] | 220 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | 
|  | 221 | unsigned long event, int is_kernel) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | { | 
|  | 223 | struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 |  | 
|  | 225 | if (!backtrace_depth) { | 
|  | 226 | log_sample(cpu_buf, pc, is_kernel, event); | 
|  | 227 | return; | 
|  | 228 | } | 
|  | 229 |  | 
|  | 230 | if (!oprofile_begin_trace(cpu_buf)) | 
|  | 231 | return; | 
|  | 232 |  | 
|  | 233 | /* if log_sample() fail we can't backtrace since we lost the source | 
|  | 234 | * of this event */ | 
|  | 235 | if (log_sample(cpu_buf, pc, is_kernel, event)) | 
|  | 236 | oprofile_ops.backtrace(regs, backtrace_depth); | 
|  | 237 | oprofile_end_trace(cpu_buf); | 
|  | 238 | } | 
|  | 239 |  | 
| Brian Rogan | 2735771 | 2006-03-28 01:56:20 -0800 | [diff] [blame] | 240 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) | 
|  | 241 | { | 
|  | 242 | int is_kernel = !user_mode(regs); | 
|  | 243 | unsigned long pc = profile_pc(regs); | 
|  | 244 |  | 
|  | 245 | oprofile_add_ext_sample(pc, regs, event, is_kernel); | 
|  | 246 | } | 
|  | 247 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) | 
|  | 249 | { | 
|  | 250 | struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; | 
|  | 251 | log_sample(cpu_buf, pc, is_kernel, event); | 
|  | 252 | } | 
|  | 253 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | void oprofile_add_trace(unsigned long pc) | 
|  | 255 | { | 
|  | 256 | struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; | 
|  | 257 |  | 
|  | 258 | if (!cpu_buf->tracing) | 
|  | 259 | return; | 
|  | 260 |  | 
|  | 261 | if (nr_available_slots(cpu_buf) < 1) { | 
|  | 262 | cpu_buf->tracing = 0; | 
|  | 263 | cpu_buf->sample_lost_overflow++; | 
|  | 264 | return; | 
|  | 265 | } | 
|  | 266 |  | 
|  | 267 | /* broken frame can give an eip with the same value as an escape code, | 
|  | 268 | * abort the trace if we get it */ | 
|  | 269 | if (pc == ESCAPE_CODE) { | 
|  | 270 | cpu_buf->tracing = 0; | 
|  | 271 | cpu_buf->backtrace_aborted++; | 
|  | 272 | return; | 
|  | 273 | } | 
|  | 274 |  | 
|  | 275 | add_sample(cpu_buf, pc, 0); | 
|  | 276 | } | 
|  | 277 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | /* | 
|  | 279 | * This serves to avoid cpu buffer overflow, and makes sure | 
|  | 280 | * the task mortuary progresses | 
|  | 281 | * | 
|  | 282 | * By using schedule_delayed_work_on and then schedule_delayed_work | 
|  | 283 | * we guarantee this will stay on the correct cpu | 
|  | 284 | */ | 
|  | 285 | static void wq_sync_buffer(void * data) | 
|  | 286 | { | 
|  | 287 | struct oprofile_cpu_buffer * b = data; | 
|  | 288 | if (b->cpu != smp_processor_id()) { | 
|  | 289 | printk("WQ on CPU%d, prefer CPU%d\n", | 
|  | 290 | smp_processor_id(), b->cpu); | 
|  | 291 | } | 
|  | 292 | sync_buffer(b->cpu); | 
|  | 293 |  | 
|  | 294 | /* don't re-add the work if we're shutting down */ | 
|  | 295 | if (work_enabled) | 
|  | 296 | schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); | 
|  | 297 | } |