blob: cc2d2faa7d9e037734f1e4c11e87418c4a3a5400 [file] [log] [blame]
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +01001/*
2 * Workqueue statistical tracer.
3 *
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 *
6 */
7
8
Zhaoleifb391252009-04-17 15:15:51 +08009#include <trace/events/workqueue.h>
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010010#include <linux/list.h>
Lai Jiangshan3690b5e2009-01-16 16:32:25 +080011#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Lai Jiangshana3578002009-07-06 16:10:23 +080013#include <linux/kref.h>
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010014#include "trace_stat.h"
15#include "trace.h"
16
17
18/* A cpu workqueue thread */
19struct cpu_workqueue_stats {
20 struct list_head list;
Lai Jiangshana3578002009-07-06 16:10:23 +080021 struct kref kref;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010022 int cpu;
Steven Rostedtef180122009-03-10 14:10:56 -040023 pid_t pid;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010024/* Can be inserted from interrupt or user context, need to be atomic */
Steven Rostedtef180122009-03-10 14:10:56 -040025 atomic_t inserted;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010026/*
27 * Don't need to be atomic, works are serialized in a single workqueue thread
28 * on a single CPU.
29 */
Steven Rostedtef180122009-03-10 14:10:56 -040030 unsigned int executed;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010031};
32
33/* List of workqueue threads on one cpu */
34struct workqueue_global_stats {
35 struct list_head list;
36 spinlock_t lock;
37};
38
39/* Don't need a global lock because allocated before the workqueues, and
40 * never freed.
41 */
Lai Jiangshan3690b5e2009-01-16 16:32:25 +080042static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
43#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010044
Lai Jiangshana3578002009-07-06 16:10:23 +080045static void cpu_workqueue_stat_free(struct kref *kref)
46{
47 kfree(container_of(kref, struct cpu_workqueue_stats, kref));
48}
49
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010050/* Insertion of a work */
51static void
52probe_workqueue_insertion(struct task_struct *wq_thread,
53 struct work_struct *work)
54{
55 int cpu = cpumask_first(&wq_thread->cpus_allowed);
Zhaolei1fdfca92009-04-20 14:58:26 +080056 struct cpu_workqueue_stats *node;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010057 unsigned long flags;
58
Lai Jiangshan3690b5e2009-01-16 16:32:25 +080059 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
Zhaolei1fdfca92009-04-20 14:58:26 +080060 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010061 if (node->pid == wq_thread->pid) {
62 atomic_inc(&node->inserted);
63 goto found;
64 }
65 }
66 pr_debug("trace_workqueue: entry not found\n");
67found:
Lai Jiangshan3690b5e2009-01-16 16:32:25 +080068 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010069}
70
71/* Execution of a work */
72static void
73probe_workqueue_execution(struct task_struct *wq_thread,
74 struct work_struct *work)
75{
76 int cpu = cpumask_first(&wq_thread->cpus_allowed);
Zhaolei1fdfca92009-04-20 14:58:26 +080077 struct cpu_workqueue_stats *node;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010078 unsigned long flags;
79
Lai Jiangshan3690b5e2009-01-16 16:32:25 +080080 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
Zhaolei1fdfca92009-04-20 14:58:26 +080081 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010082 if (node->pid == wq_thread->pid) {
83 node->executed++;
84 goto found;
85 }
86 }
87 pr_debug("trace_workqueue: entry not found\n");
88found:
Lai Jiangshan3690b5e2009-01-16 16:32:25 +080089 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010090}
91
92/* Creation of a cpu workqueue thread */
93static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
94{
95 struct cpu_workqueue_stats *cws;
96 unsigned long flags;
97
KOSAKI Motohirobbcd3062009-03-10 10:49:53 +090098 WARN_ON(cpu < 0);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +010099
100 /* Workqueues are sometimes created in atomic context */
101 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
102 if (!cws) {
103 pr_warning("trace_workqueue: not enough memory\n");
104 return;
105 }
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100106 INIT_LIST_HEAD(&cws->list);
Lai Jiangshana3578002009-07-06 16:10:23 +0800107 kref_init(&cws->kref);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100108 cws->cpu = cpu;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100109 cws->pid = wq_thread->pid;
110
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800111 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800112 list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
113 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100114}
115
116/* Destruction of a cpu workqueue thread */
117static void probe_workqueue_destruction(struct task_struct *wq_thread)
118{
119 /* Workqueue only execute on one cpu */
120 int cpu = cpumask_first(&wq_thread->cpus_allowed);
121 struct cpu_workqueue_stats *node, *next;
122 unsigned long flags;
123
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800124 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
125 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100126 list) {
127 if (node->pid == wq_thread->pid) {
128 list_del(&node->list);
Lai Jiangshana3578002009-07-06 16:10:23 +0800129 kref_put(&node->kref, cpu_workqueue_stat_free);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100130 goto found;
131 }
132 }
133
134 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
135found:
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800136 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100137
138}
139
140static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
141{
142 unsigned long flags;
143 struct cpu_workqueue_stats *ret = NULL;
144
145
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800146 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100147
Lai Jiangshana3578002009-07-06 16:10:23 +0800148 if (!list_empty(&workqueue_cpu_stat(cpu)->list)) {
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800149 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100150 struct cpu_workqueue_stats, list);
Lai Jiangshana3578002009-07-06 16:10:23 +0800151 kref_get(&ret->kref);
152 }
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100153
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800154 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100155
156 return ret;
157}
158
Steven Rostedt42548002009-03-24 13:38:36 -0400159static void *workqueue_stat_start(struct tracer_stat *trace)
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100160{
161 int cpu;
162 void *ret = NULL;
163
164 for_each_possible_cpu(cpu) {
165 ret = workqueue_stat_start_cpu(cpu);
166 if (ret)
167 return ret;
168 }
169 return NULL;
170}
171
172static void *workqueue_stat_next(void *prev, int idx)
173{
174 struct cpu_workqueue_stats *prev_cws = prev;
Lai Jiangshana3578002009-07-06 16:10:23 +0800175 struct cpu_workqueue_stats *ret;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100176 int cpu = prev_cws->cpu;
177 unsigned long flags;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100178
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800179 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
180 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
181 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
KOSAKI Motohirobbcd3062009-03-10 10:49:53 +0900182 do {
183 cpu = cpumask_next(cpu, cpu_possible_mask);
184 if (cpu >= nr_cpu_ids)
185 return NULL;
186 } while (!(ret = workqueue_stat_start_cpu(cpu)));
187 return ret;
Lai Jiangshana3578002009-07-06 16:10:23 +0800188 } else {
189 ret = list_entry(prev_cws->list.next,
190 struct cpu_workqueue_stats, list);
191 kref_get(&ret->kref);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100192 }
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800193 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100194
Lai Jiangshana3578002009-07-06 16:10:23 +0800195 return ret;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100196}
197
198static int workqueue_stat_show(struct seq_file *s, void *p)
199{
200 struct cpu_workqueue_stats *cws = p;
KOSAKI Motohiro889a6c32009-03-13 09:03:04 +0900201 struct pid *pid;
202 struct task_struct *tsk;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100203
KOSAKI Motohiro889a6c32009-03-13 09:03:04 +0900204 pid = find_get_pid(cws->pid);
205 if (pid) {
206 tsk = get_pid_task(pid, PIDTYPE_PID);
207 if (tsk) {
208 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
209 atomic_read(&cws->inserted), cws->executed,
210 tsk->comm);
211 put_task_struct(tsk);
212 }
213 put_pid(pid);
214 }
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100215
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100216 return 0;
217}
218
Lai Jiangshana3578002009-07-06 16:10:23 +0800219static void workqueue_stat_release(void *stat)
220{
221 struct cpu_workqueue_stats *node = stat;
222
223 kref_put(&node->kref, cpu_workqueue_stat_free);
224}
225
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100226static int workqueue_stat_headers(struct seq_file *s)
227{
228 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
Lai Jiangshan2f63b842009-03-25 16:59:18 +0800229 seq_printf(s, "# | | | |\n");
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100230 return 0;
231}
232
233struct tracer_stat workqueue_stats __read_mostly = {
234 .name = "workqueues",
235 .stat_start = workqueue_stat_start,
236 .stat_next = workqueue_stat_next,
237 .stat_show = workqueue_stat_show,
Lai Jiangshana3578002009-07-06 16:10:23 +0800238 .stat_release = workqueue_stat_release,
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100239 .stat_headers = workqueue_stat_headers
240};
241
242
243int __init stat_workqueue_init(void)
244{
245 if (register_stat_tracer(&workqueue_stats)) {
246 pr_warning("Unable to register workqueue stat tracer\n");
247 return 1;
248 }
249
250 return 0;
251}
252fs_initcall(stat_workqueue_init);
253
254/*
255 * Workqueues are created very early, just after pre-smp initcalls.
256 * So we must register our tracepoints at this stage.
257 */
258int __init trace_workqueue_early_init(void)
259{
260 int ret, cpu;
261
262 ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
263 if (ret)
264 goto out;
265
266 ret = register_trace_workqueue_execution(probe_workqueue_execution);
267 if (ret)
268 goto no_insertion;
269
270 ret = register_trace_workqueue_creation(probe_workqueue_creation);
271 if (ret)
272 goto no_execution;
273
274 ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
275 if (ret)
276 goto no_creation;
277
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100278 for_each_possible_cpu(cpu) {
Lai Jiangshan3690b5e2009-01-16 16:32:25 +0800279 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
280 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100281 }
282
283 return 0;
284
285no_creation:
286 unregister_trace_workqueue_creation(probe_workqueue_creation);
287no_execution:
288 unregister_trace_workqueue_execution(probe_workqueue_execution);
289no_insertion:
290 unregister_trace_workqueue_insertion(probe_workqueue_insertion);
291out:
292 pr_warning("trace_workqueue: unable to trace workqueues\n");
293
294 return 1;
295}
296early_initcall(trace_workqueue_early_init);