| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /** | 
|  | 2 | * @file buffer_sync.c | 
|  | 3 | * | 
| Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 4 | * @remark Copyright 2002-2009 OProfile authors | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * @remark Read the file COPYING | 
|  | 6 | * | 
|  | 7 | * @author John Levon <levon@movementarian.org> | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 8 | * @author Barry Kasindorf | 
| Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 9 | * @author Robert Richter <robert.richter@amd.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * | 
|  | 11 | * This is the core of the buffer management. Each | 
|  | 12 | * CPU buffer is processed and entered into the | 
|  | 13 | * global event buffer. Such processing is necessary | 
|  | 14 | * in several circumstances, mentioned below. | 
|  | 15 | * | 
|  | 16 | * The processing does the job of converting the | 
|  | 17 | * transitory EIP value into a persistent dentry/offset | 
|  | 18 | * value that the profiler can record at its leisure. | 
|  | 19 | * | 
|  | 20 | * See fs/dcookies.c for a description of the dentry/offset | 
|  | 21 | * objects. | 
|  | 22 | */ | 
|  | 23 |  | 
|  | 24 | #include <linux/mm.h> | 
|  | 25 | #include <linux/workqueue.h> | 
|  | 26 | #include <linux/notifier.h> | 
|  | 27 | #include <linux/dcookies.h> | 
|  | 28 | #include <linux/profile.h> | 
|  | 29 | #include <linux/module.h> | 
|  | 30 | #include <linux/fs.h> | 
| Bob Nelson | 1474855 | 2007-07-20 21:39:53 +0200 | [diff] [blame] | 31 | #include <linux/oprofile.h> | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 32 | #include <linux/sched.h> | 
| Bob Nelson | 1474855 | 2007-07-20 21:39:53 +0200 | [diff] [blame] | 33 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include "oprofile_stats.h" | 
|  | 35 | #include "event_buffer.h" | 
|  | 36 | #include "cpu_buffer.h" | 
|  | 37 | #include "buffer_sync.h" | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 38 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | static LIST_HEAD(dying_tasks); | 
|  | 40 | static LIST_HEAD(dead_tasks); | 
| Rusty Russell | f7df8ed | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 41 | static cpumask_var_t marked_cpus; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | static DEFINE_SPINLOCK(task_mortuary); | 
|  | 43 | static void process_task_mortuary(void); | 
|  | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | /* Take ownership of the task struct and place it on the | 
|  | 46 | * list for processing. Only after two full buffer syncs | 
|  | 47 | * does the task eventually get freed, because by then | 
|  | 48 | * we are sure we will not reference it again. | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 49 | * Can be invoked from softirq via RCU callback due to | 
|  | 50 | * call_rcu() of the task struct, hence the _irqsave. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | */ | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 52 | static int | 
|  | 53 | task_free_notify(struct notifier_block *self, unsigned long val, void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | { | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 55 | unsigned long flags; | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 56 | struct task_struct *task = data; | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 57 | spin_lock_irqsave(&task_mortuary, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | list_add(&task->tasks, &dying_tasks); | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 59 | spin_unlock_irqrestore(&task_mortuary, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | return NOTIFY_OK; | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 |  | 
|  | 64 | /* The task is on its way out. A sync of the buffer means we can catch | 
|  | 65 | * any remaining samples for this task. | 
|  | 66 | */ | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 67 | static int | 
|  | 68 | task_exit_notify(struct notifier_block *self, unsigned long val, void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | { | 
|  | 70 | /* To avoid latency problems, we only process the current CPU, | 
|  | 71 | * hoping that most samples for the task are on this CPU | 
|  | 72 | */ | 
| Ingo Molnar | 39c715b | 2005-06-21 17:14:34 -0700 | [diff] [blame] | 73 | sync_buffer(raw_smp_processor_id()); | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 74 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | } | 
|  | 76 |  | 
|  | 77 |  | 
|  | 78 | /* The task is about to try a do_munmap(). We peek at what it's going to | 
|  | 79 | * do, and if it's an executable region, process the samples first, so | 
|  | 80 | * we don't lose any. This does not have to be exact, it's a QoI issue | 
|  | 81 | * only. | 
|  | 82 | */ | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 83 | static int | 
|  | 84 | munmap_notify(struct notifier_block *self, unsigned long val, void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | { | 
|  | 86 | unsigned long addr = (unsigned long)data; | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 87 | struct mm_struct *mm = current->mm; | 
|  | 88 | struct vm_area_struct *mpnt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 |  | 
|  | 90 | down_read(&mm->mmap_sem); | 
|  | 91 |  | 
|  | 92 | mpnt = find_vma(mm, addr); | 
|  | 93 | if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) { | 
|  | 94 | up_read(&mm->mmap_sem); | 
|  | 95 | /* To avoid latency problems, we only process the current CPU, | 
|  | 96 | * hoping that most samples for the task are on this CPU | 
|  | 97 | */ | 
| Ingo Molnar | 39c715b | 2005-06-21 17:14:34 -0700 | [diff] [blame] | 98 | sync_buffer(raw_smp_processor_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | return 0; | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | up_read(&mm->mmap_sem); | 
|  | 103 | return 0; | 
|  | 104 | } | 
|  | 105 |  | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 106 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | /* We need to be told about new modules so we don't attribute to a previously | 
|  | 108 | * loaded module, or drop the samples on the floor. | 
|  | 109 | */ | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 110 | static int | 
|  | 111 | module_load_notify(struct notifier_block *self, unsigned long val, void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | { | 
|  | 113 | #ifdef CONFIG_MODULES | 
|  | 114 | if (val != MODULE_STATE_COMING) | 
|  | 115 | return 0; | 
|  | 116 |  | 
|  | 117 | /* FIXME: should we process all CPU buffers ? */ | 
| Markus Armbruster | 59cc185 | 2006-06-25 05:47:33 -0700 | [diff] [blame] | 118 | mutex_lock(&buffer_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | add_event_entry(ESCAPE_CODE); | 
|  | 120 | add_event_entry(MODULE_LOADED_CODE); | 
| Markus Armbruster | 59cc185 | 2006-06-25 05:47:33 -0700 | [diff] [blame] | 121 | mutex_unlock(&buffer_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | #endif | 
|  | 123 | return 0; | 
|  | 124 | } | 
|  | 125 |  | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 126 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | static struct notifier_block task_free_nb = { | 
|  | 128 | .notifier_call	= task_free_notify, | 
|  | 129 | }; | 
|  | 130 |  | 
|  | 131 | static struct notifier_block task_exit_nb = { | 
|  | 132 | .notifier_call	= task_exit_notify, | 
|  | 133 | }; | 
|  | 134 |  | 
|  | 135 | static struct notifier_block munmap_nb = { | 
|  | 136 | .notifier_call	= munmap_notify, | 
|  | 137 | }; | 
|  | 138 |  | 
|  | 139 | static struct notifier_block module_load_nb = { | 
|  | 140 | .notifier_call = module_load_notify, | 
|  | 141 | }; | 
|  | 142 |  | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 143 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | static void end_sync(void) | 
|  | 145 | { | 
|  | 146 | end_cpu_work(); | 
|  | 147 | /* make sure we don't leak task structs */ | 
|  | 148 | process_task_mortuary(); | 
|  | 149 | process_task_mortuary(); | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 |  | 
|  | 153 | int sync_start(void) | 
|  | 154 | { | 
|  | 155 | int err; | 
|  | 156 |  | 
| Robert Richter | 4c50d9e | 2009-01-22 14:14:14 +0100 | [diff] [blame] | 157 | if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL)) | 
|  | 158 | return -ENOMEM; | 
|  | 159 | cpumask_clear(marked_cpus); | 
|  | 160 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | start_cpu_work(); | 
|  | 162 |  | 
|  | 163 | err = task_handoff_register(&task_free_nb); | 
|  | 164 | if (err) | 
|  | 165 | goto out1; | 
|  | 166 | err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb); | 
|  | 167 | if (err) | 
|  | 168 | goto out2; | 
|  | 169 | err = profile_event_register(PROFILE_MUNMAP, &munmap_nb); | 
|  | 170 | if (err) | 
|  | 171 | goto out3; | 
|  | 172 | err = register_module_notifier(&module_load_nb); | 
|  | 173 | if (err) | 
|  | 174 | goto out4; | 
|  | 175 |  | 
|  | 176 | out: | 
|  | 177 | return err; | 
|  | 178 | out4: | 
|  | 179 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | 
|  | 180 | out3: | 
|  | 181 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | 
|  | 182 | out2: | 
|  | 183 | task_handoff_unregister(&task_free_nb); | 
|  | 184 | out1: | 
|  | 185 | end_sync(); | 
| Robert Richter | 4c50d9e | 2009-01-22 14:14:14 +0100 | [diff] [blame] | 186 | free_cpumask_var(marked_cpus); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | goto out; | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 |  | 
|  | 191 | void sync_stop(void) | 
|  | 192 | { | 
|  | 193 | unregister_module_notifier(&module_load_nb); | 
|  | 194 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | 
|  | 195 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | 
|  | 196 | task_handoff_unregister(&task_free_nb); | 
|  | 197 | end_sync(); | 
| Robert Richter | 4c50d9e | 2009-01-22 14:14:14 +0100 | [diff] [blame] | 198 | free_cpumask_var(marked_cpus); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | } | 
|  | 200 |  | 
| Jan Blunck | 448678a | 2008-02-14 19:38:36 -0800 | [diff] [blame] | 201 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | /* Optimisation. We can manage without taking the dcookie sem | 
|  | 203 | * because we cannot reach this code without at least one | 
|  | 204 | * dcookie user still being registered (namely, the reader | 
|  | 205 | * of the event buffer). */ | 
| Jan Blunck | 448678a | 2008-02-14 19:38:36 -0800 | [diff] [blame] | 206 | static inline unsigned long fast_get_dcookie(struct path *path) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | { | 
|  | 208 | unsigned long cookie; | 
| Jan Blunck | 448678a | 2008-02-14 19:38:36 -0800 | [diff] [blame] | 209 |  | 
| Nick Piggin | c2452f3 | 2008-12-01 09:33:43 +0100 | [diff] [blame] | 210 | if (path->dentry->d_flags & DCACHE_COOKIE) | 
| Jan Blunck | 448678a | 2008-02-14 19:38:36 -0800 | [diff] [blame] | 211 | return (unsigned long)path->dentry; | 
|  | 212 | get_dcookie(path, &cookie); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | return cookie; | 
|  | 214 | } | 
|  | 215 |  | 
| Jan Blunck | 448678a | 2008-02-14 19:38:36 -0800 | [diff] [blame] | 216 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | /* Look up the dcookie for the task's first VM_EXECUTABLE mapping, | 
|  | 218 | * which corresponds loosely to "application name". This is | 
|  | 219 | * not strictly necessary but allows oprofile to associate | 
|  | 220 | * shared-library samples with particular applications | 
|  | 221 | */ | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 222 | static unsigned long get_exec_dcookie(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | { | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 224 | unsigned long cookie = NO_COOKIE; | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 225 | struct vm_area_struct *vma; | 
|  | 226 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | if (!mm) | 
|  | 228 | goto out; | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 229 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
|  | 231 | if (!vma->vm_file) | 
|  | 232 | continue; | 
|  | 233 | if (!(vma->vm_flags & VM_EXECUTABLE)) | 
|  | 234 | continue; | 
| Jan Blunck | 448678a | 2008-02-14 19:38:36 -0800 | [diff] [blame] | 235 | cookie = fast_get_dcookie(&vma->vm_file->f_path); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | break; | 
|  | 237 | } | 
|  | 238 |  | 
|  | 239 | out: | 
|  | 240 | return cookie; | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 |  | 
|  | 244 | /* Convert the EIP value of a sample into a persistent dentry/offset | 
|  | 245 | * pair that can then be added to the global event buffer. We make | 
|  | 246 | * sure to do this lookup before a mm->mmap modification happens so | 
|  | 247 | * we don't lose track. | 
|  | 248 | */ | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 249 | static unsigned long | 
|  | 250 | lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | { | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 252 | unsigned long cookie = NO_COOKIE; | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 253 | struct vm_area_struct *vma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 |  | 
|  | 255 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 256 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | if (addr < vma->vm_start || addr >= vma->vm_end) | 
|  | 258 | continue; | 
|  | 259 |  | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 260 | if (vma->vm_file) { | 
| Jan Blunck | 448678a | 2008-02-14 19:38:36 -0800 | [diff] [blame] | 261 | cookie = fast_get_dcookie(&vma->vm_file->f_path); | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 262 | *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - | 
|  | 263 | vma->vm_start; | 
|  | 264 | } else { | 
|  | 265 | /* must be an anonymous map */ | 
|  | 266 | *offset = addr; | 
|  | 267 | } | 
|  | 268 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | break; | 
|  | 270 | } | 
|  | 271 |  | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 272 | if (!vma) | 
|  | 273 | cookie = INVALID_COOKIE; | 
|  | 274 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | return cookie; | 
|  | 276 | } | 
|  | 277 |  | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 278 | static unsigned long last_cookie = INVALID_COOKIE; | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 279 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | static void add_cpu_switch(int i) | 
|  | 281 | { | 
|  | 282 | add_event_entry(ESCAPE_CODE); | 
|  | 283 | add_event_entry(CPU_SWITCH_CODE); | 
|  | 284 | add_event_entry(i); | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 285 | last_cookie = INVALID_COOKIE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | } | 
|  | 287 |  | 
|  | 288 | static void add_kernel_ctx_switch(unsigned int in_kernel) | 
|  | 289 | { | 
|  | 290 | add_event_entry(ESCAPE_CODE); | 
|  | 291 | if (in_kernel) | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 292 | add_event_entry(KERNEL_ENTER_SWITCH_CODE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | else | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 294 | add_event_entry(KERNEL_EXIT_SWITCH_CODE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | } | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 296 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | static void | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 298 | add_user_ctx_switch(struct task_struct const *task, unsigned long cookie) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | { | 
|  | 300 | add_event_entry(ESCAPE_CODE); | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 301 | add_event_entry(CTX_SWITCH_CODE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | add_event_entry(task->pid); | 
|  | 303 | add_event_entry(cookie); | 
|  | 304 | /* Another code for daemon back-compat */ | 
|  | 305 | add_event_entry(ESCAPE_CODE); | 
|  | 306 | add_event_entry(CTX_TGID_CODE); | 
|  | 307 | add_event_entry(task->tgid); | 
|  | 308 | } | 
|  | 309 |  | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 310 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | static void add_cookie_switch(unsigned long cookie) | 
|  | 312 | { | 
|  | 313 | add_event_entry(ESCAPE_CODE); | 
|  | 314 | add_event_entry(COOKIE_SWITCH_CODE); | 
|  | 315 | add_event_entry(cookie); | 
|  | 316 | } | 
|  | 317 |  | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 318 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | static void add_trace_begin(void) | 
|  | 320 | { | 
|  | 321 | add_event_entry(ESCAPE_CODE); | 
|  | 322 | add_event_entry(TRACE_BEGIN_CODE); | 
|  | 323 | } | 
|  | 324 |  | 
| Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 325 | static void add_data(struct op_entry *entry, struct mm_struct *mm) | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 326 | { | 
| Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 327 | unsigned long code, pc, val; | 
|  | 328 | unsigned long cookie; | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 329 | off_t offset; | 
|  | 330 |  | 
| Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 331 | if (!op_cpu_buffer_get_data(entry, &code)) | 
| Robert Richter | dbe6e28 | 2008-12-16 11:01:18 +0100 | [diff] [blame] | 332 | return; | 
| Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 333 | if (!op_cpu_buffer_get_data(entry, &pc)) | 
|  | 334 | return; | 
|  | 335 | if (!op_cpu_buffer_get_size(entry)) | 
|  | 336 | return; | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 337 |  | 
|  | 338 | if (mm) { | 
| Robert Richter | d358e75 | 2009-01-05 13:14:04 +0100 | [diff] [blame] | 339 | cookie = lookup_dcookie(mm, pc, &offset); | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 340 |  | 
| Robert Richter | d358e75 | 2009-01-05 13:14:04 +0100 | [diff] [blame] | 341 | if (cookie == NO_COOKIE) | 
|  | 342 | offset = pc; | 
|  | 343 | if (cookie == INVALID_COOKIE) { | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 344 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); | 
| Robert Richter | d358e75 | 2009-01-05 13:14:04 +0100 | [diff] [blame] | 345 | offset = pc; | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 346 | } | 
| Robert Richter | d358e75 | 2009-01-05 13:14:04 +0100 | [diff] [blame] | 347 | if (cookie != last_cookie) { | 
|  | 348 | add_cookie_switch(cookie); | 
|  | 349 | last_cookie = cookie; | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 350 | } | 
|  | 351 | } else | 
| Robert Richter | d358e75 | 2009-01-05 13:14:04 +0100 | [diff] [blame] | 352 | offset = pc; | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 353 |  | 
|  | 354 | add_event_entry(ESCAPE_CODE); | 
|  | 355 | add_event_entry(code); | 
|  | 356 | add_event_entry(offset);	/* Offset from Dcookie */ | 
|  | 357 |  | 
| Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 358 | while (op_cpu_buffer_get_data(entry, &val)) | 
|  | 359 | add_event_entry(val); | 
| Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 360 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 |  | 
| Robert Richter | 6368a1f | 2008-12-29 18:44:21 +0100 | [diff] [blame] | 362 | static inline void add_sample_entry(unsigned long offset, unsigned long event) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | { | 
|  | 364 | add_event_entry(offset); | 
|  | 365 | add_event_entry(event); | 
|  | 366 | } | 
|  | 367 |  | 
|  | 368 |  | 
| Robert Richter | 9741b30 | 2008-12-18 19:44:20 +0100 | [diff] [blame] | 369 | /* | 
|  | 370 | * Add a sample to the global event buffer. If possible the | 
|  | 371 | * sample is converted into a persistent dentry/offset pair | 
|  | 372 | * for later lookup from userspace. Return 0 on failure. | 
|  | 373 | */ | 
|  | 374 | static int | 
|  | 375 | add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | { | 
|  | 377 | unsigned long cookie; | 
|  | 378 | off_t offset; | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 379 |  | 
| Robert Richter | 9741b30 | 2008-12-18 19:44:20 +0100 | [diff] [blame] | 380 | if (in_kernel) { | 
|  | 381 | add_sample_entry(s->eip, s->event); | 
|  | 382 | return 1; | 
|  | 383 | } | 
|  | 384 |  | 
|  | 385 | /* add userspace sample */ | 
|  | 386 |  | 
|  | 387 | if (!mm) { | 
|  | 388 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | 
|  | 389 | return 0; | 
|  | 390 | } | 
|  | 391 |  | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 392 | cookie = lookup_dcookie(mm, s->eip, &offset); | 
|  | 393 |  | 
| John Levon | 0c0a400 | 2005-06-23 22:02:47 -0700 | [diff] [blame] | 394 | if (cookie == INVALID_COOKIE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); | 
|  | 396 | return 0; | 
|  | 397 | } | 
|  | 398 |  | 
|  | 399 | if (cookie != last_cookie) { | 
|  | 400 | add_cookie_switch(cookie); | 
|  | 401 | last_cookie = cookie; | 
|  | 402 | } | 
|  | 403 |  | 
|  | 404 | add_sample_entry(offset, s->event); | 
|  | 405 |  | 
|  | 406 | return 1; | 
|  | 407 | } | 
|  | 408 |  | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 409 |  | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 410 | static void release_mm(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | { | 
|  | 412 | if (!mm) | 
|  | 413 | return; | 
|  | 414 | up_read(&mm->mmap_sem); | 
|  | 415 | mmput(mm); | 
|  | 416 | } | 
|  | 417 |  | 
|  | 418 |  | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 419 | static struct mm_struct *take_tasks_mm(struct task_struct *task) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | { | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 421 | struct mm_struct *mm = get_task_mm(task); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | if (mm) | 
|  | 423 | down_read(&mm->mmap_sem); | 
|  | 424 | return mm; | 
|  | 425 | } | 
|  | 426 |  | 
|  | 427 |  | 
|  | 428 | static inline int is_code(unsigned long val) | 
|  | 429 | { | 
|  | 430 | return val == ESCAPE_CODE; | 
|  | 431 | } | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 432 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | /* Move tasks along towards death. Any tasks on dead_tasks | 
|  | 435 | * will definitely have no remaining references in any | 
|  | 436 | * CPU buffers at this point, because we use two lists, | 
|  | 437 | * and to have reached the list, it must have gone through | 
|  | 438 | * one full sync already. | 
|  | 439 | */ | 
|  | 440 | static void process_task_mortuary(void) | 
|  | 441 | { | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 442 | unsigned long flags; | 
|  | 443 | LIST_HEAD(local_dead_tasks); | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 444 | struct task_struct *task; | 
|  | 445 | struct task_struct *ttask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 |  | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 447 | spin_lock_irqsave(&task_mortuary, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 |  | 
| Paul E. McKenney | 4369ef3 | 2006-01-08 01:01:35 -0800 | [diff] [blame] | 449 | list_splice_init(&dead_tasks, &local_dead_tasks); | 
|  | 450 | list_splice_init(&dying_tasks, &dead_tasks); | 
|  | 451 |  | 
|  | 452 | spin_unlock_irqrestore(&task_mortuary, flags); | 
|  | 453 |  | 
|  | 454 | list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | list_del(&task->tasks); | 
|  | 456 | free_task(task); | 
|  | 457 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | } | 
|  | 459 |  | 
|  | 460 |  | 
|  | 461 | static void mark_done(int cpu) | 
|  | 462 | { | 
|  | 463 | int i; | 
|  | 464 |  | 
| Rusty Russell | f7df8ed | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 465 | cpumask_set_cpu(cpu, marked_cpus); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 |  | 
|  | 467 | for_each_online_cpu(i) { | 
| Rusty Russell | f7df8ed | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 468 | if (!cpumask_test_cpu(i, marked_cpus)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | return; | 
|  | 470 | } | 
|  | 471 |  | 
|  | 472 | /* All CPUs have been processed at least once, | 
|  | 473 | * we can process the mortuary once | 
|  | 474 | */ | 
|  | 475 | process_task_mortuary(); | 
|  | 476 |  | 
| Rusty Russell | f7df8ed | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 477 | cpumask_clear(marked_cpus); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | } | 
|  | 479 |  | 
|  | 480 |  | 
|  | 481 | /* FIXME: this is not sufficient if we implement syscall barrier backtrace | 
|  | 482 | * traversal, the code switch to sb_sample_start at first kernel enter/exit | 
|  | 483 | * switch so we need a fifth state and some special handling in sync_buffer() | 
|  | 484 | */ | 
|  | 485 | typedef enum { | 
|  | 486 | sb_bt_ignore = -2, | 
|  | 487 | sb_buffer_start, | 
|  | 488 | sb_bt_start, | 
|  | 489 | sb_sample_start, | 
|  | 490 | } sync_buffer_state; | 
|  | 491 |  | 
|  | 492 | /* Sync one of the CPU's buffers into the global event buffer. | 
|  | 493 | * Here we need to go through each batch of samples punctuated | 
|  | 494 | * by context switch notes, taking the task's mmap_sem and doing | 
|  | 495 | * lookup in task->mm->mmap to convert EIP into dcookie/offset | 
|  | 496 | * value. | 
|  | 497 | */ | 
|  | 498 | void sync_buffer(int cpu) | 
|  | 499 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | struct mm_struct *mm = NULL; | 
| Robert Richter | fd7826d | 2008-09-26 17:50:31 -0400 | [diff] [blame] | 501 | struct mm_struct *oldmm; | 
| Robert Richter | bd7dc46 | 2009-01-06 03:56:50 +0100 | [diff] [blame] | 502 | unsigned long val; | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 503 | struct task_struct *new; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | unsigned long cookie = 0; | 
|  | 505 | int in_kernel = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | sync_buffer_state state = sb_buffer_start; | 
| Barry Kasindorf | 9b1f261 | 2008-07-15 00:10:36 +0200 | [diff] [blame] | 507 | unsigned int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | unsigned long available; | 
| Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 509 | unsigned long flags; | 
| Robert Richter | 2d87b14 | 2008-12-30 04:10:46 +0100 | [diff] [blame] | 510 | struct op_entry entry; | 
|  | 511 | struct op_sample *sample; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 |  | 
| Markus Armbruster | 59cc185 | 2006-06-25 05:47:33 -0700 | [diff] [blame] | 513 | mutex_lock(&buffer_mutex); | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 514 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | add_cpu_switch(cpu); | 
|  | 516 |  | 
| Robert Richter | 6d2c53f | 2008-12-24 16:53:53 +0100 | [diff] [blame] | 517 | op_cpu_buffer_reset(cpu); | 
|  | 518 | available = op_cpu_buffer_entries(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 |  | 
|  | 520 | for (i = 0; i < available; ++i) { | 
| Robert Richter | 2d87b14 | 2008-12-30 04:10:46 +0100 | [diff] [blame] | 521 | sample = op_cpu_buffer_read_entry(&entry, cpu); | 
|  | 522 | if (!sample) | 
| Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame] | 523 | break; | 
| Robert Richter | 73185e0 | 2008-07-22 21:08:51 +0200 | [diff] [blame] | 524 |  | 
| Robert Richter | 2d87b14 | 2008-12-30 04:10:46 +0100 | [diff] [blame] | 525 | if (is_code(sample->eip)) { | 
| Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 526 | flags = sample->event; | 
|  | 527 | if (flags & TRACE_BEGIN) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | state = sb_bt_start; | 
|  | 529 | add_trace_begin(); | 
| Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 530 | } | 
|  | 531 | if (flags & KERNEL_CTX_SWITCH) { | 
|  | 532 | /* kernel/userspace switch */ | 
|  | 533 | in_kernel = flags & IS_KERNEL; | 
|  | 534 | if (state == sb_buffer_start) | 
|  | 535 | state = sb_sample_start; | 
|  | 536 | add_kernel_ctx_switch(flags & IS_KERNEL); | 
|  | 537 | } | 
| Robert Richter | bd7dc46 | 2009-01-06 03:56:50 +0100 | [diff] [blame] | 538 | if (flags & USER_CTX_SWITCH | 
|  | 539 | && op_cpu_buffer_get_data(&entry, &val)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | /* userspace context switch */ | 
| Robert Richter | bd7dc46 | 2009-01-06 03:56:50 +0100 | [diff] [blame] | 541 | new = (struct task_struct *)val; | 
| Robert Richter | fd7826d | 2008-09-26 17:50:31 -0400 | [diff] [blame] | 542 | oldmm = mm; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | release_mm(oldmm); | 
|  | 544 | mm = take_tasks_mm(new); | 
|  | 545 | if (mm != oldmm) | 
|  | 546 | cookie = get_exec_dcookie(mm); | 
|  | 547 | add_user_ctx_switch(new, cookie); | 
|  | 548 | } | 
| Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 549 | if (op_cpu_buffer_get_size(&entry)) | 
|  | 550 | add_data(&entry, mm); | 
| Robert Richter | 317f33b | 2008-12-18 19:44:20 +0100 | [diff] [blame] | 551 | continue; | 
|  | 552 | } | 
|  | 553 |  | 
|  | 554 | if (state < sb_bt_start) | 
|  | 555 | /* ignore sample */ | 
|  | 556 | continue; | 
|  | 557 |  | 
| Robert Richter | 2d87b14 | 2008-12-30 04:10:46 +0100 | [diff] [blame] | 558 | if (add_sample(mm, sample, in_kernel)) | 
| Robert Richter | 317f33b | 2008-12-18 19:44:20 +0100 | [diff] [blame] | 559 | continue; | 
|  | 560 |  | 
|  | 561 | /* ignore backtraces if failed to add a sample */ | 
|  | 562 | if (state == sb_bt_start) { | 
|  | 563 | state = sb_bt_ignore; | 
|  | 564 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | } | 
|  | 567 | release_mm(mm); | 
|  | 568 |  | 
|  | 569 | mark_done(cpu); | 
|  | 570 |  | 
| Markus Armbruster | 59cc185 | 2006-06-25 05:47:33 -0700 | [diff] [blame] | 571 | mutex_unlock(&buffer_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | } | 
| Carl Love | a5598ca | 2008-10-14 23:37:01 +0000 | [diff] [blame] | 573 |  | 
|  | 574 | /* The function can be used to add a buffer worth of data directly to | 
|  | 575 | * the kernel buffer. The buffer is assumed to be a circular buffer. | 
|  | 576 | * Take the entries from index start and end at index end, wrapping | 
|  | 577 | * at max_entries. | 
|  | 578 | */ | 
|  | 579 | void oprofile_put_buff(unsigned long *buf, unsigned int start, | 
|  | 580 | unsigned int stop, unsigned int max) | 
|  | 581 | { | 
|  | 582 | int i; | 
|  | 583 |  | 
|  | 584 | i = start; | 
|  | 585 |  | 
|  | 586 | mutex_lock(&buffer_mutex); | 
|  | 587 | while (i != stop) { | 
|  | 588 | add_event_entry(buf[i++]); | 
|  | 589 |  | 
|  | 590 | if (i >= max) | 
|  | 591 | i = 0; | 
|  | 592 | } | 
|  | 593 |  | 
|  | 594 | mutex_unlock(&buffer_mutex); | 
|  | 595 | } | 
|  | 596 |  |