blob: b99caa8b780c624af834caeab4130549479e3844 [file] [log] [blame]
Dave Chinner455b2862010-07-07 13:24:06 +10001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM writeback
3
4#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_WRITEBACK_H
6
7#include <linux/backing-dev.h>
Randy Dunlap96dccab2010-07-19 16:49:17 -07008#include <linux/device.h>
Dave Chinner455b2862010-07-07 13:24:06 +10009#include <linux/writeback.h>
10
Wu Fengguang251d6a42010-12-01 17:33:37 -060011#define show_inode_state(state) \
12 __print_flags(state, "|", \
13 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
14 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
15 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
16 {I_NEW, "I_NEW"}, \
17 {I_WILL_FREE, "I_WILL_FREE"}, \
18 {I_FREEING, "I_FREEING"}, \
19 {I_CLEAR, "I_CLEAR"}, \
20 {I_SYNC, "I_SYNC"}, \
21 {I_REFERENCED, "I_REFERENCED"} \
22 )
23
Dave Chinner455b2862010-07-07 13:24:06 +100024struct wb_writeback_work;
25
26DECLARE_EVENT_CLASS(writeback_work_class,
27 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
28 TP_ARGS(bdi, work),
29 TP_STRUCT__entry(
30 __array(char, name, 32)
31 __field(long, nr_pages)
32 __field(dev_t, sb_dev)
33 __field(int, sync_mode)
34 __field(int, for_kupdate)
35 __field(int, range_cyclic)
36 __field(int, for_background)
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060037 __field(int, reason)
Dave Chinner455b2862010-07-07 13:24:06 +100038 ),
39 TP_fast_assign(
40 strncpy(__entry->name, dev_name(bdi->dev), 32);
41 __entry->nr_pages = work->nr_pages;
42 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
43 __entry->sync_mode = work->sync_mode;
44 __entry->for_kupdate = work->for_kupdate;
45 __entry->range_cyclic = work->range_cyclic;
46 __entry->for_background = work->for_background;
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060047 __entry->reason = work->reason;
Dave Chinner455b2862010-07-07 13:24:06 +100048 ),
49 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060050 "kupdate=%d range_cyclic=%d background=%d reason=%s",
Dave Chinner455b2862010-07-07 13:24:06 +100051 __entry->name,
52 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
53 __entry->nr_pages,
54 __entry->sync_mode,
55 __entry->for_kupdate,
56 __entry->range_cyclic,
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060057 __entry->for_background,
58 wb_reason_name[__entry->reason]
Dave Chinner455b2862010-07-07 13:24:06 +100059 )
60);
61#define DEFINE_WRITEBACK_WORK_EVENT(name) \
62DEFINE_EVENT(writeback_work_class, name, \
63 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
64 TP_ARGS(bdi, work))
65DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
66DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
67DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
Wu Fengguangd46db3d2011-05-04 19:54:37 -060068DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
69DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
70DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
Dave Chinner455b2862010-07-07 13:24:06 +100071
72TRACE_EVENT(writeback_pages_written,
73 TP_PROTO(long pages_written),
74 TP_ARGS(pages_written),
75 TP_STRUCT__entry(
76 __field(long, pages)
77 ),
78 TP_fast_assign(
79 __entry->pages = pages_written;
80 ),
81 TP_printk("%ld", __entry->pages)
82);
83
84DECLARE_EVENT_CLASS(writeback_class,
85 TP_PROTO(struct backing_dev_info *bdi),
86 TP_ARGS(bdi),
87 TP_STRUCT__entry(
88 __array(char, name, 32)
89 ),
90 TP_fast_assign(
91 strncpy(__entry->name, dev_name(bdi->dev), 32);
92 ),
93 TP_printk("bdi %s",
94 __entry->name
95 )
96);
97#define DEFINE_WRITEBACK_EVENT(name) \
98DEFINE_EVENT(writeback_class, name, \
99 TP_PROTO(struct backing_dev_info *bdi), \
100 TP_ARGS(bdi))
101
102DEFINE_WRITEBACK_EVENT(writeback_nowork);
Wu Fengguang71927e82011-01-13 15:45:46 -0800103DEFINE_WRITEBACK_EVENT(writeback_wake_background);
Artem Bityutskiy60332022010-07-25 14:29:24 +0300104DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
105DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
Dave Chinner455b2862010-07-07 13:24:06 +1000106DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
107DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
108DEFINE_WRITEBACK_EVENT(writeback_thread_start);
109DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
110
Dave Chinner028c2dd2010-07-07 13:24:07 +1000111DECLARE_EVENT_CLASS(wbc_class,
112 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
113 TP_ARGS(wbc, bdi),
114 TP_STRUCT__entry(
115 __array(char, name, 32)
116 __field(long, nr_to_write)
117 __field(long, pages_skipped)
118 __field(int, sync_mode)
Dave Chinner028c2dd2010-07-07 13:24:07 +1000119 __field(int, for_kupdate)
120 __field(int, for_background)
121 __field(int, for_reclaim)
122 __field(int, range_cyclic)
Dave Chinner028c2dd2010-07-07 13:24:07 +1000123 __field(long, range_start)
124 __field(long, range_end)
125 ),
126
127 TP_fast_assign(
128 strncpy(__entry->name, dev_name(bdi->dev), 32);
129 __entry->nr_to_write = wbc->nr_to_write;
130 __entry->pages_skipped = wbc->pages_skipped;
131 __entry->sync_mode = wbc->sync_mode;
132 __entry->for_kupdate = wbc->for_kupdate;
133 __entry->for_background = wbc->for_background;
134 __entry->for_reclaim = wbc->for_reclaim;
135 __entry->range_cyclic = wbc->range_cyclic;
Dave Chinner028c2dd2010-07-07 13:24:07 +1000136 __entry->range_start = (long)wbc->range_start;
137 __entry->range_end = (long)wbc->range_end;
138 ),
139
140 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
Wu Fengguangd46db3d2011-05-04 19:54:37 -0600141 "bgrd=%d reclm=%d cyclic=%d "
Dave Chinner028c2dd2010-07-07 13:24:07 +1000142 "start=0x%lx end=0x%lx",
143 __entry->name,
144 __entry->nr_to_write,
145 __entry->pages_skipped,
146 __entry->sync_mode,
147 __entry->for_kupdate,
148 __entry->for_background,
149 __entry->for_reclaim,
150 __entry->range_cyclic,
Dave Chinner028c2dd2010-07-07 13:24:07 +1000151 __entry->range_start,
152 __entry->range_end)
153)
154
155#define DEFINE_WBC_EVENT(name) \
156DEFINE_EVENT(wbc_class, name, \
157 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
158 TP_ARGS(wbc, bdi))
Dave Chinner9e094382010-07-07 13:24:08 +1000159DEFINE_WBC_EVENT(wbc_writepage);
Dave Chinner028c2dd2010-07-07 13:24:07 +1000160
Wu Fengguange84d0a42011-04-23 12:27:27 -0600161TRACE_EVENT(writeback_queue_io,
162 TP_PROTO(struct bdi_writeback *wb,
Curt Wohlgemuthad4e38d2011-10-07 21:51:56 -0600163 struct wb_writeback_work *work,
Wu Fengguange84d0a42011-04-23 12:27:27 -0600164 int moved),
Curt Wohlgemuthad4e38d2011-10-07 21:51:56 -0600165 TP_ARGS(wb, work, moved),
Wu Fengguange84d0a42011-04-23 12:27:27 -0600166 TP_STRUCT__entry(
167 __array(char, name, 32)
168 __field(unsigned long, older)
169 __field(long, age)
170 __field(int, moved)
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -0600171 __field(int, reason)
Wu Fengguange84d0a42011-04-23 12:27:27 -0600172 ),
173 TP_fast_assign(
Curt Wohlgemuthad4e38d2011-10-07 21:51:56 -0600174 unsigned long *older_than_this = work->older_than_this;
Wu Fengguange84d0a42011-04-23 12:27:27 -0600175 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
176 __entry->older = older_than_this ? *older_than_this : 0;
177 __entry->age = older_than_this ?
178 (jiffies - *older_than_this) * 1000 / HZ : -1;
179 __entry->moved = moved;
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -0600180 __entry->reason = work->reason;
Wu Fengguange84d0a42011-04-23 12:27:27 -0600181 ),
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -0600182 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
Wu Fengguange84d0a42011-04-23 12:27:27 -0600183 __entry->name,
184 __entry->older, /* older_than_this in jiffies */
185 __entry->age, /* older_than_this in relative milliseconds */
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -0600186 __entry->moved,
187 wb_reason_name[__entry->reason])
Wu Fengguange84d0a42011-04-23 12:27:27 -0600188);
189
Wu Fengguange1cbe232010-12-06 22:34:29 -0600190TRACE_EVENT(global_dirty_state,
191
192 TP_PROTO(unsigned long background_thresh,
193 unsigned long dirty_thresh
194 ),
195
196 TP_ARGS(background_thresh,
197 dirty_thresh
198 ),
199
200 TP_STRUCT__entry(
201 __field(unsigned long, nr_dirty)
202 __field(unsigned long, nr_writeback)
203 __field(unsigned long, nr_unstable)
204 __field(unsigned long, background_thresh)
205 __field(unsigned long, dirty_thresh)
206 __field(unsigned long, dirty_limit)
207 __field(unsigned long, nr_dirtied)
208 __field(unsigned long, nr_written)
209 ),
210
211 TP_fast_assign(
212 __entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
213 __entry->nr_writeback = global_page_state(NR_WRITEBACK);
214 __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
215 __entry->nr_dirtied = global_page_state(NR_DIRTIED);
216 __entry->nr_written = global_page_state(NR_WRITTEN);
217 __entry->background_thresh = background_thresh;
218 __entry->dirty_thresh = dirty_thresh;
219 __entry->dirty_limit = global_dirty_limit;
220 ),
221
222 TP_printk("dirty=%lu writeback=%lu unstable=%lu "
223 "bg_thresh=%lu thresh=%lu limit=%lu "
224 "dirtied=%lu written=%lu",
225 __entry->nr_dirty,
226 __entry->nr_writeback,
227 __entry->nr_unstable,
228 __entry->background_thresh,
229 __entry->dirty_thresh,
230 __entry->dirty_limit,
231 __entry->nr_dirtied,
232 __entry->nr_written
233 )
234);
235
Wu Fengguangb48c1042011-03-02 17:22:49 -0600236#define KBps(x) ((x) << (PAGE_SHIFT - 10))
237
238TRACE_EVENT(bdi_dirty_ratelimit,
239
240 TP_PROTO(struct backing_dev_info *bdi,
241 unsigned long dirty_rate,
242 unsigned long task_ratelimit),
243
244 TP_ARGS(bdi, dirty_rate, task_ratelimit),
245
246 TP_STRUCT__entry(
247 __array(char, bdi, 32)
248 __field(unsigned long, write_bw)
249 __field(unsigned long, avg_write_bw)
250 __field(unsigned long, dirty_rate)
251 __field(unsigned long, dirty_ratelimit)
252 __field(unsigned long, task_ratelimit)
253 __field(unsigned long, balanced_dirty_ratelimit)
254 ),
255
256 TP_fast_assign(
257 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
258 __entry->write_bw = KBps(bdi->write_bandwidth);
259 __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
260 __entry->dirty_rate = KBps(dirty_rate);
261 __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
262 __entry->task_ratelimit = KBps(task_ratelimit);
263 __entry->balanced_dirty_ratelimit =
264 KBps(bdi->balanced_dirty_ratelimit);
265 ),
266
267 TP_printk("bdi %s: "
268 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
269 "dirty_ratelimit=%lu task_ratelimit=%lu "
270 "balanced_dirty_ratelimit=%lu",
271 __entry->bdi,
272 __entry->write_bw, /* write bandwidth */
273 __entry->avg_write_bw, /* avg write bandwidth */
274 __entry->dirty_rate, /* bdi dirty rate */
275 __entry->dirty_ratelimit, /* base ratelimit */
276 __entry->task_ratelimit, /* ratelimit with position control */
277 __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
278 )
279);
280
Wu Fengguangece13ac2010-08-29 23:33:20 -0600281TRACE_EVENT(balance_dirty_pages,
282
283 TP_PROTO(struct backing_dev_info *bdi,
284 unsigned long thresh,
285 unsigned long bg_thresh,
286 unsigned long dirty,
287 unsigned long bdi_thresh,
288 unsigned long bdi_dirty,
289 unsigned long dirty_ratelimit,
290 unsigned long task_ratelimit,
291 unsigned long dirtied,
292 long pause,
293 unsigned long start_time),
294
295 TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
296 dirty_ratelimit, task_ratelimit,
297 dirtied, pause, start_time),
298
299 TP_STRUCT__entry(
300 __array( char, bdi, 32)
301 __field(unsigned long, limit)
302 __field(unsigned long, setpoint)
303 __field(unsigned long, dirty)
304 __field(unsigned long, bdi_setpoint)
305 __field(unsigned long, bdi_dirty)
306 __field(unsigned long, dirty_ratelimit)
307 __field(unsigned long, task_ratelimit)
308 __field(unsigned int, dirtied)
309 __field(unsigned int, dirtied_pause)
310 __field(unsigned long, paused)
311 __field( long, pause)
312 ),
313
314 TP_fast_assign(
315 unsigned long freerun = (thresh + bg_thresh) / 2;
316 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
317
318 __entry->limit = global_dirty_limit;
319 __entry->setpoint = (global_dirty_limit + freerun) / 2;
320 __entry->dirty = dirty;
321 __entry->bdi_setpoint = __entry->setpoint *
322 bdi_thresh / (thresh + 1);
323 __entry->bdi_dirty = bdi_dirty;
324 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
325 __entry->task_ratelimit = KBps(task_ratelimit);
326 __entry->dirtied = dirtied;
327 __entry->dirtied_pause = current->nr_dirtied_pause;
328 __entry->pause = pause * 1000 / HZ;
329 __entry->paused = (jiffies - start_time) * 1000 / HZ;
330 ),
331
332
333 TP_printk("bdi %s: "
334 "limit=%lu setpoint=%lu dirty=%lu "
335 "bdi_setpoint=%lu bdi_dirty=%lu "
336 "dirty_ratelimit=%lu task_ratelimit=%lu "
337 "dirtied=%u dirtied_pause=%u "
338 "paused=%lu pause=%ld",
339 __entry->bdi,
340 __entry->limit,
341 __entry->setpoint,
342 __entry->dirty,
343 __entry->bdi_setpoint,
344 __entry->bdi_dirty,
345 __entry->dirty_ratelimit,
346 __entry->task_ratelimit,
347 __entry->dirtied,
348 __entry->dirtied_pause,
349 __entry->paused, /* ms */
350 __entry->pause /* ms */
351 )
352);
353
Mel Gorman52bb9192010-10-26 14:21:41 -0700354DECLARE_EVENT_CLASS(writeback_congest_waited_template,
355
356 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
357
358 TP_ARGS(usec_timeout, usec_delayed),
359
360 TP_STRUCT__entry(
361 __field( unsigned int, usec_timeout )
362 __field( unsigned int, usec_delayed )
363 ),
364
365 TP_fast_assign(
366 __entry->usec_timeout = usec_timeout;
367 __entry->usec_delayed = usec_delayed;
368 ),
369
370 TP_printk("usec_timeout=%u usec_delayed=%u",
371 __entry->usec_timeout,
372 __entry->usec_delayed)
373);
374
375DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
376
377 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
378
379 TP_ARGS(usec_timeout, usec_delayed)
380);
381
Mel Gorman0e093d992010-10-26 14:21:45 -0700382DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
383
384 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
385
386 TP_ARGS(usec_timeout, usec_delayed)
387);
388
Wu Fengguang251d6a42010-12-01 17:33:37 -0600389DECLARE_EVENT_CLASS(writeback_single_inode_template,
390
391 TP_PROTO(struct inode *inode,
392 struct writeback_control *wbc,
393 unsigned long nr_to_write
394 ),
395
396 TP_ARGS(inode, wbc, nr_to_write),
397
398 TP_STRUCT__entry(
399 __array(char, name, 32)
400 __field(unsigned long, ino)
401 __field(unsigned long, state)
Wu Fengguangc8ad6202011-08-29 09:52:23 -0600402 __field(unsigned long, dirtied_when)
Wu Fengguang251d6a42010-12-01 17:33:37 -0600403 __field(unsigned long, writeback_index)
404 __field(long, nr_to_write)
405 __field(unsigned long, wrote)
406 ),
407
408 TP_fast_assign(
409 strncpy(__entry->name,
410 dev_name(inode->i_mapping->backing_dev_info->dev), 32);
411 __entry->ino = inode->i_ino;
412 __entry->state = inode->i_state;
Wu Fengguangc8ad6202011-08-29 09:52:23 -0600413 __entry->dirtied_when = inode->dirtied_when;
Wu Fengguang251d6a42010-12-01 17:33:37 -0600414 __entry->writeback_index = inode->i_mapping->writeback_index;
415 __entry->nr_to_write = nr_to_write;
416 __entry->wrote = nr_to_write - wbc->nr_to_write;
417 ),
418
Wu Fengguangc8ad6202011-08-29 09:52:23 -0600419 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
Wu Fengguang251d6a42010-12-01 17:33:37 -0600420 "index=%lu to_write=%ld wrote=%lu",
421 __entry->name,
422 __entry->ino,
423 show_inode_state(__entry->state),
Wu Fengguangc8ad6202011-08-29 09:52:23 -0600424 __entry->dirtied_when,
425 (jiffies - __entry->dirtied_when) / HZ,
Wu Fengguang251d6a42010-12-01 17:33:37 -0600426 __entry->writeback_index,
427 __entry->nr_to_write,
428 __entry->wrote
429 )
430);
431
432DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
433 TP_PROTO(struct inode *inode,
434 struct writeback_control *wbc,
435 unsigned long nr_to_write),
436 TP_ARGS(inode, wbc, nr_to_write)
437);
438
439DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
440 TP_PROTO(struct inode *inode,
441 struct writeback_control *wbc,
442 unsigned long nr_to_write),
443 TP_ARGS(inode, wbc, nr_to_write)
444);
445
Dave Chinner455b2862010-07-07 13:24:06 +1000446#endif /* _TRACE_WRITEBACK_H */
447
448/* This part must be outside protection */
449#include <trace/define_trace.h>