| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /** | 
|  | 2 | * @file buffer_sync.c | 
|  | 3 | * | 
|  | 4 | * @remark Copyright 2002 OProfile authors | 
|  | 5 | * @remark Read the file COPYING | 
|  | 6 | * | 
|  | 7 | * @author John Levon <levon@movementarian.org> | 
|  | 8 | * | 
|  | 9 | * This is the core of the buffer management. Each | 
|  | 10 | * CPU buffer is processed and entered into the | 
|  | 11 | * global event buffer. Such processing is necessary | 
|  | 12 | * in several circumstances, mentioned below. | 
|  | 13 | * | 
|  | 14 | * The processing does the job of converting the | 
|  | 15 | * transitory EIP value into a persistent dentry/offset | 
|  | 16 | * value that the profiler can record at its leisure. | 
|  | 17 | * | 
|  | 18 | * See fs/dcookies.c for a description of the dentry/offset | 
|  | 19 | * objects. | 
|  | 20 | */ | 
|  | 21 |  | 
|  | 22 | #include <linux/mm.h> | 
|  | 23 | #include <linux/workqueue.h> | 
|  | 24 | #include <linux/notifier.h> | 
|  | 25 | #include <linux/dcookies.h> | 
|  | 26 | #include <linux/profile.h> | 
|  | 27 | #include <linux/module.h> | 
|  | 28 | #include <linux/fs.h> | 
|  | 29 |  | 
|  | 30 | #include "oprofile_stats.h" | 
|  | 31 | #include "event_buffer.h" | 
|  | 32 | #include "cpu_buffer.h" | 
|  | 33 | #include "buffer_sync.h" | 
|  | 34 |  | 
|  | 35 | static LIST_HEAD(dying_tasks); | 
|  | 36 | static LIST_HEAD(dead_tasks); | 
|  | 37 | static cpumask_t marked_cpus = CPU_MASK_NONE; | 
|  | 38 | static DEFINE_SPINLOCK(task_mortuary); | 
|  | 39 | static void process_task_mortuary(void); | 
|  | 40 |  | 
|  | 41 |  | 
|  | 42 | /* Take ownership of the task struct and place it on the | 
|  | 43 | * list for processing. Only after two full buffer syncs | 
|  | 44 | * does the task eventually get freed, because by then | 
|  | 45 | * we are sure we will not reference it again. | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 46 | * Can be invoked from softirq via RCU callback due to | 
|  | 47 | * call_rcu() of the task struct, hence the _irqsave. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | */ | 
|  | 49 | static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) | 
|  | 50 | { | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 51 | unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | struct task_struct * task = data; | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 53 | spin_lock_irqsave(&task_mortuary, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | list_add(&task->tasks, &dying_tasks); | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 55 | spin_unlock_irqrestore(&task_mortuary, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | return NOTIFY_OK; | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 |  | 
|  | 60 | /* The task is on its way out. A sync of the buffer means we can catch | 
|  | 61 | * any remaining samples for this task. | 
|  | 62 | */ | 
|  | 63 | static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data) | 
|  | 64 | { | 
|  | 65 | /* To avoid latency problems, we only process the current CPU, | 
|  | 66 | * hoping that most samples for the task are on this CPU | 
|  | 67 | */ | 
| Ingo Molnar | 39c715b | 2005-06-21 17:14:34 -0700 | [diff] [blame] | 68 | sync_buffer(raw_smp_processor_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | return 0; | 
|  | 70 | } | 
|  | 71 |  | 
|  | 72 |  | 
|  | 73 | /* The task is about to try a do_munmap(). We peek at what it's going to | 
|  | 74 | * do, and if it's an executable region, process the samples first, so | 
|  | 75 | * we don't lose any. This does not have to be exact, it's a QoI issue | 
|  | 76 | * only. | 
|  | 77 | */ | 
|  | 78 | static int munmap_notify(struct notifier_block * self, unsigned long val, void * data) | 
|  | 79 | { | 
|  | 80 | unsigned long addr = (unsigned long)data; | 
|  | 81 | struct mm_struct * mm = current->mm; | 
|  | 82 | struct vm_area_struct * mpnt; | 
|  | 83 |  | 
|  | 84 | down_read(&mm->mmap_sem); | 
|  | 85 |  | 
|  | 86 | mpnt = find_vma(mm, addr); | 
|  | 87 | if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) { | 
|  | 88 | up_read(&mm->mmap_sem); | 
|  | 89 | /* To avoid latency problems, we only process the current CPU, | 
|  | 90 | * hoping that most samples for the task are on this CPU | 
|  | 91 | */ | 
| Ingo Molnar | 39c715b | 2005-06-21 17:14:34 -0700 | [diff] [blame] | 92 | sync_buffer(raw_smp_processor_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | return 0; | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | up_read(&mm->mmap_sem); | 
|  | 97 | return 0; | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 |  | 
|  | 101 | /* We need to be told about new modules so we don't attribute to a previously | 
|  | 102 | * loaded module, or drop the samples on the floor. | 
|  | 103 | */ | 
|  | 104 | static int module_load_notify(struct notifier_block * self, unsigned long val, void * data) | 
|  | 105 | { | 
|  | 106 | #ifdef CONFIG_MODULES | 
|  | 107 | if (val != MODULE_STATE_COMING) | 
|  | 108 | return 0; | 
|  | 109 |  | 
|  | 110 | /* FIXME: should we process all CPU buffers ? */ | 
| Markus Armbruster | 59cc185 | 2006-06-25 05:47:33 -0700 | [diff] [blame] | 111 | mutex_lock(&buffer_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | add_event_entry(ESCAPE_CODE); | 
|  | 113 | add_event_entry(MODULE_LOADED_CODE); | 
| Markus Armbruster | 59cc185 | 2006-06-25 05:47:33 -0700 | [diff] [blame] | 114 | mutex_unlock(&buffer_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | #endif | 
|  | 116 | return 0; | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 |  | 
|  | 120 | static struct notifier_block task_free_nb = { | 
|  | 121 | .notifier_call	= task_free_notify, | 
|  | 122 | }; | 
|  | 123 |  | 
|  | 124 | static struct notifier_block task_exit_nb = { | 
|  | 125 | .notifier_call	= task_exit_notify, | 
|  | 126 | }; | 
|  | 127 |  | 
|  | 128 | static struct notifier_block munmap_nb = { | 
|  | 129 | .notifier_call	= munmap_notify, | 
|  | 130 | }; | 
|  | 131 |  | 
|  | 132 | static struct notifier_block module_load_nb = { | 
|  | 133 | .notifier_call = module_load_notify, | 
|  | 134 | }; | 
|  | 135 |  | 
|  | 136 |  | 
|  | 137 | static void end_sync(void) | 
|  | 138 | { | 
|  | 139 | end_cpu_work(); | 
|  | 140 | /* make sure we don't leak task structs */ | 
|  | 141 | process_task_mortuary(); | 
|  | 142 | process_task_mortuary(); | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 |  | 
|  | 146 | int sync_start(void) | 
|  | 147 | { | 
|  | 148 | int err; | 
|  | 149 |  | 
|  | 150 | start_cpu_work(); | 
|  | 151 |  | 
|  | 152 | err = task_handoff_register(&task_free_nb); | 
|  | 153 | if (err) | 
|  | 154 | goto out1; | 
|  | 155 | err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb); | 
|  | 156 | if (err) | 
|  | 157 | goto out2; | 
|  | 158 | err = profile_event_register(PROFILE_MUNMAP, &munmap_nb); | 
|  | 159 | if (err) | 
|  | 160 | goto out3; | 
|  | 161 | err = register_module_notifier(&module_load_nb); | 
|  | 162 | if (err) | 
|  | 163 | goto out4; | 
|  | 164 |  | 
|  | 165 | out: | 
|  | 166 | return err; | 
|  | 167 | out4: | 
|  | 168 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | 
|  | 169 | out3: | 
|  | 170 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | 
|  | 171 | out2: | 
|  | 172 | task_handoff_unregister(&task_free_nb); | 
|  | 173 | out1: | 
|  | 174 | end_sync(); | 
|  | 175 | goto out; | 
|  | 176 | } | 
|  | 177 |  | 
|  | 178 |  | 
|  | 179 | void sync_stop(void) | 
|  | 180 | { | 
|  | 181 | unregister_module_notifier(&module_load_nb); | 
|  | 182 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | 
|  | 183 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | 
|  | 184 | task_handoff_unregister(&task_free_nb); | 
|  | 185 | end_sync(); | 
|  | 186 | } | 
|  | 187 |  | 
|  | 188 |  | 
|  | 189 | /* Optimisation. We can manage without taking the dcookie sem | 
|  | 190 | * because we cannot reach this code without at least one | 
|  | 191 | * dcookie user still being registered (namely, the reader | 
|  | 192 | * of the event buffer). */ | 
|  | 193 | static inline unsigned long fast_get_dcookie(struct dentry * dentry, | 
|  | 194 | struct vfsmount * vfsmnt) | 
|  | 195 | { | 
|  | 196 | unsigned long cookie; | 
|  | 197 |  | 
|  | 198 | if (dentry->d_cookie) | 
|  | 199 | return (unsigned long)dentry; | 
|  | 200 | get_dcookie(dentry, vfsmnt, &cookie); | 
|  | 201 | return cookie; | 
|  | 202 | } | 
|  | 203 |  | 
|  | 204 |  | 
|  | 205 | /* Look up the dcookie for the task's first VM_EXECUTABLE mapping, | 
|  | 206 | * which corresponds loosely to "application name". This is | 
|  | 207 | * not strictly necessary but allows oprofile to associate | 
|  | 208 | * shared-library samples with particular applications | 
|  | 209 | */ | 
|  | 210 | static unsigned long get_exec_dcookie(struct mm_struct * mm) | 
|  | 211 | { | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 212 | unsigned long cookie = NO_COOKIE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | struct vm_area_struct * vma; | 
|  | 214 |  | 
|  | 215 | if (!mm) | 
|  | 216 | goto out; | 
|  | 217 |  | 
|  | 218 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
|  | 219 | if (!vma->vm_file) | 
|  | 220 | continue; | 
|  | 221 | if (!(vma->vm_flags & VM_EXECUTABLE)) | 
|  | 222 | continue; | 
|  | 223 | cookie = fast_get_dcookie(vma->vm_file->f_dentry, | 
|  | 224 | vma->vm_file->f_vfsmnt); | 
|  | 225 | break; | 
|  | 226 | } | 
|  | 227 |  | 
|  | 228 | out: | 
|  | 229 | return cookie; | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 |  | 
|  | 233 | /* Convert the EIP value of a sample into a persistent dentry/offset | 
|  | 234 | * pair that can then be added to the global event buffer. We make | 
|  | 235 | * sure to do this lookup before a mm->mmap modification happens so | 
|  | 236 | * we don't lose track. | 
|  | 237 | */ | 
|  | 238 | static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset) | 
|  | 239 | { | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 240 | unsigned long cookie = NO_COOKIE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | struct vm_area_struct * vma; | 
|  | 242 |  | 
|  | 243 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | 
|  | 244 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | if (addr < vma->vm_start || addr >= vma->vm_end) | 
|  | 246 | continue; | 
|  | 247 |  | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 248 | if (vma->vm_file) { | 
|  | 249 | cookie = fast_get_dcookie(vma->vm_file->f_dentry, | 
|  | 250 | vma->vm_file->f_vfsmnt); | 
|  | 251 | *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - | 
|  | 252 | vma->vm_start; | 
|  | 253 | } else { | 
|  | 254 | /* must be an anonymous map */ | 
|  | 255 | *offset = addr; | 
|  | 256 | } | 
|  | 257 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | break; | 
|  | 259 | } | 
|  | 260 |  | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 261 | if (!vma) | 
|  | 262 | cookie = INVALID_COOKIE; | 
|  | 263 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | return cookie; | 
|  | 265 | } | 
|  | 266 |  | 
|  | 267 |  | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 268 | static unsigned long last_cookie = INVALID_COOKIE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 |  | 
|  | 270 | static void add_cpu_switch(int i) | 
|  | 271 | { | 
|  | 272 | add_event_entry(ESCAPE_CODE); | 
|  | 273 | add_event_entry(CPU_SWITCH_CODE); | 
|  | 274 | add_event_entry(i); | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 275 | last_cookie = INVALID_COOKIE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | } | 
|  | 277 |  | 
|  | 278 | static void add_kernel_ctx_switch(unsigned int in_kernel) | 
|  | 279 | { | 
|  | 280 | add_event_entry(ESCAPE_CODE); | 
|  | 281 | if (in_kernel) | 
|  | 282 | add_event_entry(KERNEL_ENTER_SWITCH_CODE); | 
|  | 283 | else | 
|  | 284 | add_event_entry(KERNEL_EXIT_SWITCH_CODE); | 
|  | 285 | } | 
|  | 286 |  | 
|  | 287 | static void | 
|  | 288 | add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) | 
|  | 289 | { | 
|  | 290 | add_event_entry(ESCAPE_CODE); | 
|  | 291 | add_event_entry(CTX_SWITCH_CODE); | 
|  | 292 | add_event_entry(task->pid); | 
|  | 293 | add_event_entry(cookie); | 
|  | 294 | /* Another code for daemon back-compat */ | 
|  | 295 | add_event_entry(ESCAPE_CODE); | 
|  | 296 | add_event_entry(CTX_TGID_CODE); | 
|  | 297 | add_event_entry(task->tgid); | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 |  | 
|  | 301 | static void add_cookie_switch(unsigned long cookie) | 
|  | 302 | { | 
|  | 303 | add_event_entry(ESCAPE_CODE); | 
|  | 304 | add_event_entry(COOKIE_SWITCH_CODE); | 
|  | 305 | add_event_entry(cookie); | 
|  | 306 | } | 
|  | 307 |  | 
|  | 308 |  | 
|  | 309 | static void add_trace_begin(void) | 
|  | 310 | { | 
|  | 311 | add_event_entry(ESCAPE_CODE); | 
|  | 312 | add_event_entry(TRACE_BEGIN_CODE); | 
|  | 313 | } | 
|  | 314 |  | 
|  | 315 |  | 
|  | 316 | static void add_sample_entry(unsigned long offset, unsigned long event) | 
|  | 317 | { | 
|  | 318 | add_event_entry(offset); | 
|  | 319 | add_event_entry(event); | 
|  | 320 | } | 
|  | 321 |  | 
|  | 322 |  | 
|  | 323 | static int add_us_sample(struct mm_struct * mm, struct op_sample * s) | 
|  | 324 | { | 
|  | 325 | unsigned long cookie; | 
|  | 326 | off_t offset; | 
|  | 327 |  | 
|  | 328 | cookie = lookup_dcookie(mm, s->eip, &offset); | 
|  | 329 |  | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 330 | if (cookie == INVALID_COOKIE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); | 
|  | 332 | return 0; | 
|  | 333 | } | 
|  | 334 |  | 
|  | 335 | if (cookie != last_cookie) { | 
|  | 336 | add_cookie_switch(cookie); | 
|  | 337 | last_cookie = cookie; | 
|  | 338 | } | 
|  | 339 |  | 
|  | 340 | add_sample_entry(offset, s->event); | 
|  | 341 |  | 
|  | 342 | return 1; | 
|  | 343 | } | 
|  | 344 |  | 
|  | 345 |  | 
|  | 346 | /* Add a sample to the global event buffer. If possible the | 
|  | 347 | * sample is converted into a persistent dentry/offset pair | 
|  | 348 | * for later lookup from userspace. | 
|  | 349 | */ | 
|  | 350 | static int | 
|  | 351 | add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) | 
|  | 352 | { | 
|  | 353 | if (in_kernel) { | 
|  | 354 | add_sample_entry(s->eip, s->event); | 
|  | 355 | return 1; | 
|  | 356 | } else if (mm) { | 
|  | 357 | return add_us_sample(mm, s); | 
|  | 358 | } else { | 
|  | 359 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | 
|  | 360 | } | 
|  | 361 | return 0; | 
|  | 362 | } | 
|  | 363 |  | 
|  | 364 |  | 
|  | 365 | static void release_mm(struct mm_struct * mm) | 
|  | 366 | { | 
|  | 367 | if (!mm) | 
|  | 368 | return; | 
|  | 369 | up_read(&mm->mmap_sem); | 
|  | 370 | mmput(mm); | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 |  | 
|  | 374 | static struct mm_struct * take_tasks_mm(struct task_struct * task) | 
|  | 375 | { | 
|  | 376 | struct mm_struct * mm = get_task_mm(task); | 
|  | 377 | if (mm) | 
|  | 378 | down_read(&mm->mmap_sem); | 
|  | 379 | return mm; | 
|  | 380 | } | 
|  | 381 |  | 
|  | 382 |  | 
|  | 383 | static inline int is_code(unsigned long val) | 
|  | 384 | { | 
|  | 385 | return val == ESCAPE_CODE; | 
|  | 386 | } | 
|  | 387 |  | 
|  | 388 |  | 
|  | 389 | /* "acquire" as many cpu buffer slots as we can */ | 
|  | 390 | static unsigned long get_slots(struct oprofile_cpu_buffer * b) | 
|  | 391 | { | 
|  | 392 | unsigned long head = b->head_pos; | 
|  | 393 | unsigned long tail = b->tail_pos; | 
|  | 394 |  | 
|  | 395 | /* | 
|  | 396 | * Subtle. This resets the persistent last_task | 
|  | 397 | * and in_kernel values used for switching notes. | 
|  | 398 | * BUT, there is a small window between reading | 
|  | 399 | * head_pos, and this call, that means samples | 
|  | 400 | * can appear at the new head position, but not | 
|  | 401 | * be prefixed with the notes for switching | 
|  | 402 | * kernel mode or a task switch. This small hole | 
|  | 403 | * can lead to mis-attribution or samples where | 
|  | 404 | * we don't know if it's in the kernel or not, | 
|  | 405 | * at the start of an event buffer. | 
|  | 406 | */ | 
|  | 407 | cpu_buffer_reset(b); | 
|  | 408 |  | 
|  | 409 | if (head >= tail) | 
|  | 410 | return head - tail; | 
|  | 411 |  | 
|  | 412 | return head + (b->buffer_size - tail); | 
|  | 413 | } | 
|  | 414 |  | 
|  | 415 |  | 
|  | 416 | static void increment_tail(struct oprofile_cpu_buffer * b) | 
|  | 417 | { | 
|  | 418 | unsigned long new_tail = b->tail_pos + 1; | 
|  | 419 |  | 
|  | 420 | rmb(); | 
|  | 421 |  | 
|  | 422 | if (new_tail < b->buffer_size) | 
|  | 423 | b->tail_pos = new_tail; | 
|  | 424 | else | 
|  | 425 | b->tail_pos = 0; | 
|  | 426 | } | 
|  | 427 |  | 
|  | 428 |  | 
|  | 429 | /* Move tasks along towards death. Any tasks on dead_tasks | 
|  | 430 | * will definitely have no remaining references in any | 
|  | 431 | * CPU buffers at this point, because we use two lists, | 
|  | 432 | * and to have reached the list, it must have gone through | 
|  | 433 | * one full sync already. | 
|  | 434 | */ | 
|  | 435 | static void process_task_mortuary(void) | 
|  | 436 | { | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 437 | unsigned long flags; | 
|  | 438 | LIST_HEAD(local_dead_tasks); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | struct task_struct * task; | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 440 | struct task_struct * ttask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 |  | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 442 | spin_lock_irqsave(&task_mortuary, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 |  | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 444 | list_splice_init(&dead_tasks, &local_dead_tasks); | 
|  | 445 | list_splice_init(&dying_tasks, &dead_tasks); | 
|  | 446 |  | 
|  | 447 | spin_unlock_irqrestore(&task_mortuary, flags); | 
|  | 448 |  | 
|  | 449 | list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | list_del(&task->tasks); | 
|  | 451 | free_task(task); | 
|  | 452 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | } | 
|  | 454 |  | 
|  | 455 |  | 
|  | 456 | static void mark_done(int cpu) | 
|  | 457 | { | 
|  | 458 | int i; | 
|  | 459 |  | 
|  | 460 | cpu_set(cpu, marked_cpus); | 
|  | 461 |  | 
|  | 462 | for_each_online_cpu(i) { | 
|  | 463 | if (!cpu_isset(i, marked_cpus)) | 
|  | 464 | return; | 
|  | 465 | } | 
|  | 466 |  | 
|  | 467 | /* All CPUs have been processed at least once, | 
|  | 468 | * we can process the mortuary once | 
|  | 469 | */ | 
|  | 470 | process_task_mortuary(); | 
|  | 471 |  | 
|  | 472 | cpus_clear(marked_cpus); | 
|  | 473 | } | 
|  | 474 |  | 
|  | 475 |  | 
|  | 476 | /* FIXME: this is not sufficient if we implement syscall barrier backtrace | 
|  | 477 | * traversal, the code switch to sb_sample_start at first kernel enter/exit | 
|  | 478 | * switch so we need a fifth state and some special handling in sync_buffer() | 
|  | 479 | */ | 
|  | 480 | typedef enum { | 
|  | 481 | sb_bt_ignore = -2, | 
|  | 482 | sb_buffer_start, | 
|  | 483 | sb_bt_start, | 
|  | 484 | sb_sample_start, | 
|  | 485 | } sync_buffer_state; | 
|  | 486 |  | 
|  | 487 | /* Sync one of the CPU's buffers into the global event buffer. | 
|  | 488 | * Here we need to go through each batch of samples punctuated | 
|  | 489 | * by context switch notes, taking the task's mmap_sem and doing | 
|  | 490 | * lookup in task->mm->mmap to convert EIP into dcookie/offset | 
|  | 491 | * value. | 
|  | 492 | */ | 
|  | 493 | void sync_buffer(int cpu) | 
|  | 494 | { | 
|  | 495 | struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu]; | 
|  | 496 | struct mm_struct *mm = NULL; | 
|  | 497 | struct task_struct * new; | 
|  | 498 | unsigned long cookie = 0; | 
|  | 499 | int in_kernel = 1; | 
|  | 500 | unsigned int i; | 
|  | 501 | sync_buffer_state state = sb_buffer_start; | 
|  | 502 | unsigned long available; | 
|  | 503 |  | 
| Markus Armbruster | 59cc185 | 2006-06-25 05:47:33 -0700 | [diff] [blame] | 504 | mutex_lock(&buffer_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 |  | 
|  | 506 | add_cpu_switch(cpu); | 
|  | 507 |  | 
|  | 508 | /* Remember, only we can modify tail_pos */ | 
|  | 509 |  | 
|  | 510 | available = get_slots(cpu_buf); | 
|  | 511 |  | 
|  | 512 | for (i = 0; i < available; ++i) { | 
|  | 513 | struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos]; | 
|  | 514 |  | 
|  | 515 | if (is_code(s->eip)) { | 
|  | 516 | if (s->event <= CPU_IS_KERNEL) { | 
|  | 517 | /* kernel/userspace switch */ | 
|  | 518 | in_kernel = s->event; | 
|  | 519 | if (state == sb_buffer_start) | 
|  | 520 | state = sb_sample_start; | 
|  | 521 | add_kernel_ctx_switch(s->event); | 
|  | 522 | } else if (s->event == CPU_TRACE_BEGIN) { | 
|  | 523 | state = sb_bt_start; | 
|  | 524 | add_trace_begin(); | 
|  | 525 | } else { | 
|  | 526 | struct mm_struct * oldmm = mm; | 
|  | 527 |  | 
|  | 528 | /* userspace context switch */ | 
|  | 529 | new = (struct task_struct *)s->event; | 
|  | 530 |  | 
|  | 531 | release_mm(oldmm); | 
|  | 532 | mm = take_tasks_mm(new); | 
|  | 533 | if (mm != oldmm) | 
|  | 534 | cookie = get_exec_dcookie(mm); | 
|  | 535 | add_user_ctx_switch(new, cookie); | 
|  | 536 | } | 
|  | 537 | } else { | 
|  | 538 | if (state >= sb_bt_start && | 
|  | 539 | !add_sample(mm, s, in_kernel)) { | 
|  | 540 | if (state == sb_bt_start) { | 
|  | 541 | state = sb_bt_ignore; | 
|  | 542 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | 
|  | 543 | } | 
|  | 544 | } | 
|  | 545 | } | 
|  | 546 |  | 
|  | 547 | increment_tail(cpu_buf); | 
|  | 548 | } | 
|  | 549 | release_mm(mm); | 
|  | 550 |  | 
|  | 551 | mark_done(cpu); | 
|  | 552 |  | 
| Markus Armbruster | 59cc185 | 2006-06-25 05:47:33 -0700 | [diff] [blame] | 553 | mutex_unlock(&buffer_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | } |