blob: 361ecfa4d28dc00e5fa2d7b654a8cb08cf501b68 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050019
Vivek Goyal062a6442010-09-15 17:06:33 -040020enum blkio_policy_id {
21 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040022 BLKIO_POLICY_THROTL, /* Throttling */
Tejun Heo035d10b2012-03-05 13:15:04 -080023
24 BLKIO_NR_POLICIES,
Vivek Goyal062a6442010-09-15 17:06:33 -040025};
26
Vivek Goyal9355aed2010-10-01 21:16:41 +020027/* Max limits for throttle policy */
28#define THROTL_IOPS_MAX UINT_MAX
29
Tejun Heo32e380a2012-03-05 13:14:54 -080030#ifdef CONFIG_BLK_CGROUP
Jens Axboe2f5ea472009-12-03 21:06:43 +010031
Tejun Heod3d32e692012-04-01 14:38:42 -070032/* cft->private [un]packing for stat printing */
33#define BLKCG_STAT_PRIV(pol, off) (((unsigned)(pol) << 16) | (off))
34#define BLKCG_STAT_POL(prv) ((unsigned)(prv) >> 16)
35#define BLKCG_STAT_OFF(prv) ((unsigned)(prv) & 0xffff)
Tejun Heo2aa4a152012-04-01 14:38:42 -070036
Tejun Heoedcb0722012-04-01 14:38:42 -070037enum blkg_rwstat_type {
38 BLKG_RWSTAT_READ,
39 BLKG_RWSTAT_WRITE,
40 BLKG_RWSTAT_SYNC,
41 BLKG_RWSTAT_ASYNC,
42
43 BLKG_RWSTAT_NR,
44 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070045};
46
Divyesh Shah812df482010-04-08 21:15:35 -070047/* blkg state flags */
48enum blkg_state_flags {
49 BLKG_waiting = 0,
50 BLKG_idling,
51 BLKG_empty,
52};
53
Vivek Goyal31e4c282009-12-03 12:59:42 -050054struct blkio_cgroup {
55 struct cgroup_subsys_state css;
56 unsigned int weight;
57 spinlock_t lock;
58 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070059
60 /* for policies to test whether associated blkcg has changed */
61 uint64_t id;
Vivek Goyal31e4c282009-12-03 12:59:42 -050062};
63
Tejun Heoedcb0722012-04-01 14:38:42 -070064struct blkg_stat {
65 struct u64_stats_sync syncp;
66 uint64_t cnt;
67};
68
69struct blkg_rwstat {
70 struct u64_stats_sync syncp;
71 uint64_t cnt[BLKG_RWSTAT_NR];
72};
73
Divyesh Shah303a3ac2010-04-01 15:01:24 -070074struct blkio_group_stats {
Tejun Heoedcb0722012-04-01 14:38:42 -070075 /* number of ios merged */
76 struct blkg_rwstat merged;
77 /* total time spent on device in ns, may not be accurate w/ queueing */
78 struct blkg_rwstat service_time;
79 /* total time spent waiting in scheduler queue in ns */
80 struct blkg_rwstat wait_time;
81 /* number of IOs queued up */
82 struct blkg_rwstat queued;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070083 /* total disk time and nr sectors dispatched by this group */
Tejun Heoedcb0722012-04-01 14:38:42 -070084 struct blkg_stat time;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070085#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoedcb0722012-04-01 14:38:42 -070086 /* time not charged to this cgroup */
87 struct blkg_stat unaccounted_time;
88 /* sum of number of ios queued across all samples */
89 struct blkg_stat avg_queue_size_sum;
90 /* count of samples taken for average */
91 struct blkg_stat avg_queue_size_samples;
92 /* how many times this group has been removed from service tree */
93 struct blkg_stat dequeue;
94 /* total time spent waiting for it to be assigned a timeslice. */
95 struct blkg_stat group_wait_time;
96 /* time spent idling for this blkio_group */
97 struct blkg_stat idle_time;
98 /* total time with empty current active q with other requests queued */
99 struct blkg_stat empty_time;
Tejun Heo997a0262012-03-08 10:53:58 -0800100 /* fields after this shouldn't be cleared on stat reset */
Tejun Heoedcb0722012-04-01 14:38:42 -0700101 uint64_t start_group_wait_time;
102 uint64_t start_idle_time;
103 uint64_t start_empty_time;
104 uint16_t flags;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700105#endif
106};
107
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400108/* Per cpu blkio group stats */
109struct blkio_group_stats_cpu {
Tejun Heoedcb0722012-04-01 14:38:42 -0700110 /* total bytes transferred */
111 struct blkg_rwstat service_bytes;
112 /* total IOs serviced, post merge */
113 struct blkg_rwstat serviced;
114 /* total sectors transferred */
115 struct blkg_stat sectors;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400116};
117
Tejun Heoe56da7e2012-03-05 13:15:07 -0800118struct blkio_group_conf {
119 unsigned int weight;
Tejun Heoc4682ae2012-04-01 14:38:43 -0700120 u64 iops[2];
Tejun Heoe56da7e2012-03-05 13:15:07 -0800121 u64 bps[2];
122};
123
Tejun Heo03814112012-03-05 13:15:14 -0800124/* per-blkg per-policy data */
125struct blkg_policy_data {
126 /* the blkg this per-policy data belongs to */
127 struct blkio_group *blkg;
128
Tejun Heo549d3aa2012-03-05 13:15:16 -0800129 /* Configuration */
130 struct blkio_group_conf conf;
131
132 struct blkio_group_stats stats;
133 /* Per cpu stats pointer */
134 struct blkio_group_stats_cpu __percpu *stats_cpu;
135
Tejun Heo03814112012-03-05 13:15:14 -0800136 /* pol->pdata_size bytes of private data used by policy impl */
137 char pdata[] __aligned(__alignof__(unsigned long long));
138};
139
Vivek Goyal31e4c282009-12-03 12:59:42 -0500140struct blkio_group {
Tejun Heoc875f4d2012-03-05 13:15:22 -0800141 /* Pointer to the associated request_queue */
142 struct request_queue *q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800143 struct list_head q_node;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500144 struct hlist_node blkcg_node;
Tejun Heo7ee9c562012-03-05 13:15:11 -0800145 struct blkio_cgroup *blkcg;
Vivek Goyal2868ef72009-12-03 12:59:48 -0500146 /* Store cgroup path */
147 char path[128];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800148 /* reference count */
149 int refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500150
Tejun Heo549d3aa2012-03-05 13:15:16 -0800151 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800152
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800153 /* List of blkg waiting for per cpu stats memory to be allocated */
154 struct list_head alloc_node;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800155 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500156};
157
Tejun Heo03814112012-03-05 13:15:14 -0800158typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
Tejun Heoca32aef2012-03-05 13:15:03 -0800159typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200160 struct blkio_group *blkg, unsigned int weight);
Tejun Heoca32aef2012-03-05 13:15:03 -0800161typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200162 struct blkio_group *blkg, u64 read_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800163typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200164 struct blkio_group *blkg, u64 write_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800165typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200166 struct blkio_group *blkg, unsigned int read_iops);
Tejun Heoca32aef2012-03-05 13:15:03 -0800167typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200168 struct blkio_group *blkg, unsigned int write_iops);
Vivek Goyal3e252062009-12-04 10:36:42 -0500169
170struct blkio_policy_ops {
Tejun Heo03814112012-03-05 13:15:14 -0800171 blkio_init_group_fn *blkio_init_group_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500172 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400173 blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
174 blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400175 blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
176 blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500177};
178
179struct blkio_policy_type {
180 struct list_head list;
181 struct blkio_policy_ops ops;
Vivek Goyal062a6442010-09-15 17:06:33 -0400182 enum blkio_policy_id plid;
Tejun Heo03814112012-03-05 13:15:14 -0800183 size_t pdata_size; /* policy specific private data size */
Vivek Goyal3e252062009-12-04 10:36:42 -0500184};
185
Tejun Heo5efd6112012-03-05 13:15:12 -0800186extern int blkcg_init_queue(struct request_queue *q);
187extern void blkcg_drain_queue(struct request_queue *q);
188extern void blkcg_exit_queue(struct request_queue *q);
189
Vivek Goyal3e252062009-12-04 10:36:42 -0500190/* Blkio controller policy registration */
191extern void blkio_policy_register(struct blkio_policy_type *);
192extern void blkio_policy_unregister(struct blkio_policy_type *);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800193extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
194extern void update_root_blkg_pd(struct request_queue *q,
195 enum blkio_policy_id plid);
Vivek Goyal3e252062009-12-04 10:36:42 -0500196
Tejun Heo829fdb52012-04-01 14:38:43 -0700197void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
198 u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int),
199 int pol, int data, bool show_total);
200u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
201u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
202 const struct blkg_rwstat *rwstat);
203int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
204 struct seq_file *sf);
205int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
206 struct seq_file *sf);
207int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
208 struct seq_file *sf);
209int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
210 struct seq_file *sf);
211
212struct blkg_conf_ctx {
213 struct gendisk *disk;
214 struct blkio_group *blkg;
215 u64 v;
216};
217
218int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
219 struct blkg_conf_ctx *ctx);
220void blkg_conf_finish(struct blkg_conf_ctx *ctx);
221
222
Tejun Heo03814112012-03-05 13:15:14 -0800223/**
224 * blkg_to_pdata - get policy private data
225 * @blkg: blkg of interest
226 * @pol: policy of interest
227 *
228 * Return pointer to private data associated with the @blkg-@pol pair.
229 */
230static inline void *blkg_to_pdata(struct blkio_group *blkg,
231 struct blkio_policy_type *pol)
232{
Tejun Heo549d3aa2012-03-05 13:15:16 -0800233 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800234}
235
236/**
237 * pdata_to_blkg - get blkg associated with policy private data
238 * @pdata: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800239 *
Tejun Heoaaec55a2012-04-01 14:38:42 -0700240 * @pdata is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800241 */
Tejun Heoaaec55a2012-04-01 14:38:42 -0700242static inline struct blkio_group *pdata_to_blkg(void *pdata)
Tejun Heo03814112012-03-05 13:15:14 -0800243{
244 if (pdata) {
245 struct blkg_policy_data *pd =
246 container_of(pdata, struct blkg_policy_data, pdata);
247 return pd->blkg;
248 }
249 return NULL;
250}
251
Vivek Goyalafc24d42010-04-26 19:27:56 +0200252static inline char *blkg_path(struct blkio_group *blkg)
253{
254 return blkg->path;
255}
256
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800257/**
258 * blkg_get - get a blkg reference
259 * @blkg: blkg to get
260 *
261 * The caller should be holding queue_lock and an existing reference.
262 */
263static inline void blkg_get(struct blkio_group *blkg)
264{
265 lockdep_assert_held(blkg->q->queue_lock);
266 WARN_ON_ONCE(!blkg->refcnt);
267 blkg->refcnt++;
268}
269
270void __blkg_release(struct blkio_group *blkg);
271
272/**
273 * blkg_put - put a blkg reference
274 * @blkg: blkg to put
275 *
276 * The caller should be holding queue_lock.
277 */
278static inline void blkg_put(struct blkio_group *blkg)
279{
280 lockdep_assert_held(blkg->q->queue_lock);
281 WARN_ON_ONCE(blkg->refcnt <= 0);
282 if (!--blkg->refcnt)
283 __blkg_release(blkg);
284}
285
Tejun Heoedcb0722012-04-01 14:38:42 -0700286/**
287 * blkg_stat_add - add a value to a blkg_stat
288 * @stat: target blkg_stat
289 * @val: value to add
290 *
291 * Add @val to @stat. The caller is responsible for synchronizing calls to
292 * this function.
293 */
294static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
295{
296 u64_stats_update_begin(&stat->syncp);
297 stat->cnt += val;
298 u64_stats_update_end(&stat->syncp);
299}
300
301/**
302 * blkg_stat_read - read the current value of a blkg_stat
303 * @stat: blkg_stat to read
304 *
305 * Read the current value of @stat. This function can be called without
306 * synchroniztion and takes care of u64 atomicity.
307 */
308static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
309{
310 unsigned int start;
311 uint64_t v;
312
313 do {
314 start = u64_stats_fetch_begin(&stat->syncp);
315 v = stat->cnt;
316 } while (u64_stats_fetch_retry(&stat->syncp, start));
317
318 return v;
319}
320
321/**
322 * blkg_stat_reset - reset a blkg_stat
323 * @stat: blkg_stat to reset
324 */
325static inline void blkg_stat_reset(struct blkg_stat *stat)
326{
327 stat->cnt = 0;
328}
329
330/**
331 * blkg_rwstat_add - add a value to a blkg_rwstat
332 * @rwstat: target blkg_rwstat
333 * @rw: mask of REQ_{WRITE|SYNC}
334 * @val: value to add
335 *
336 * Add @val to @rwstat. The counters are chosen according to @rw. The
337 * caller is responsible for synchronizing calls to this function.
338 */
339static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
340 int rw, uint64_t val)
341{
342 u64_stats_update_begin(&rwstat->syncp);
343
344 if (rw & REQ_WRITE)
345 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
346 else
347 rwstat->cnt[BLKG_RWSTAT_READ] += val;
348 if (rw & REQ_SYNC)
349 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
350 else
351 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
352
353 u64_stats_update_end(&rwstat->syncp);
354}
355
356/**
357 * blkg_rwstat_read - read the current values of a blkg_rwstat
358 * @rwstat: blkg_rwstat to read
359 *
360 * Read the current snapshot of @rwstat and return it as the return value.
361 * This function can be called without synchronization and takes care of
362 * u64 atomicity.
363 */
364static struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
365{
366 unsigned int start;
367 struct blkg_rwstat tmp;
368
369 do {
370 start = u64_stats_fetch_begin(&rwstat->syncp);
371 tmp = *rwstat;
372 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
373
374 return tmp;
375}
376
377/**
378 * blkg_rwstat_sum - read the total count of a blkg_rwstat
379 * @rwstat: blkg_rwstat to read
380 *
381 * Return the total count of @rwstat regardless of the IO direction. This
382 * function can be called without synchronization and takes care of u64
383 * atomicity.
384 */
385static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
386{
387 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
388
389 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
390}
391
392/**
393 * blkg_rwstat_reset - reset a blkg_rwstat
394 * @rwstat: blkg_rwstat to reset
395 */
396static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
397{
398 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
399}
400
Jens Axboe2f5ea472009-12-03 21:06:43 +0100401#else
402
403struct blkio_group {
404};
405
Vivek Goyal3e252062009-12-04 10:36:42 -0500406struct blkio_policy_type {
407};
408
Tejun Heo5efd6112012-03-05 13:15:12 -0800409static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
410static inline void blkcg_drain_queue(struct request_queue *q) { }
411static inline void blkcg_exit_queue(struct request_queue *q) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500412static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
413static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
Tejun Heo03aa264a2012-03-05 13:15:19 -0800414static inline void blkg_destroy_all(struct request_queue *q,
Tejun Heo03aa264a2012-03-05 13:15:19 -0800415 bool destory_root) { }
Tejun Heoe8989fa2012-03-05 13:15:20 -0800416static inline void update_root_blkg_pd(struct request_queue *q,
417 enum blkio_policy_id plid) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500418
Tejun Heo03814112012-03-05 13:15:14 -0800419static inline void *blkg_to_pdata(struct blkio_group *blkg,
420 struct blkio_policy_type *pol) { return NULL; }
421static inline struct blkio_group *pdata_to_blkg(void *pdata,
422 struct blkio_policy_type *pol) { return NULL; }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200423static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800424static inline void blkg_get(struct blkio_group *blkg) { }
425static inline void blkg_put(struct blkio_group *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200426
Jens Axboe2f5ea472009-12-03 21:06:43 +0100427#endif
428
Justin TerAvestdf457f82011-03-08 19:45:00 +0100429#define BLKIO_WEIGHT_MIN 10
Vivek Goyal31e4c282009-12-03 12:59:42 -0500430#define BLKIO_WEIGHT_MAX 1000
431#define BLKIO_WEIGHT_DEFAULT 500
432
Vivek Goyal2868ef72009-12-03 12:59:48 -0500433#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoc1768262012-03-05 13:15:17 -0800434void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
435 struct blkio_policy_type *pol);
Divyesh Shah91952912010-04-01 15:01:41 -0700436void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800437 struct blkio_policy_type *pol,
438 unsigned long dequeue);
439void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
440 struct blkio_policy_type *pol);
441void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
442 struct blkio_policy_type *pol);
443void blkiocg_set_start_empty_time(struct blkio_group *blkg,
444 struct blkio_policy_type *pol);
Divyesh Shah812df482010-04-08 21:15:35 -0700445
446#define BLKG_FLAG_FNS(name) \
447static inline void blkio_mark_blkg_##name( \
448 struct blkio_group_stats *stats) \
449{ \
450 stats->flags |= (1 << BLKG_##name); \
451} \
452static inline void blkio_clear_blkg_##name( \
453 struct blkio_group_stats *stats) \
454{ \
455 stats->flags &= ~(1 << BLKG_##name); \
456} \
457static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
458{ \
459 return (stats->flags & (1 << BLKG_##name)) != 0; \
460} \
461
462BLKG_FLAG_FNS(waiting)
463BLKG_FLAG_FNS(idling)
464BLKG_FLAG_FNS(empty)
465#undef BLKG_FLAG_FNS
Vivek Goyal2868ef72009-12-03 12:59:48 -0500466#else
Tejun Heoc1768262012-03-05 13:15:17 -0800467static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
468 struct blkio_policy_type *pol) { }
Divyesh Shah91952912010-04-01 15:01:41 -0700469static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800470 struct blkio_policy_type *pol, unsigned long dequeue) { }
471static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
472 struct blkio_policy_type *pol) { }
473static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
474 struct blkio_policy_type *pol) { }
475static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
476 struct blkio_policy_type *pol) { }
Vivek Goyal2868ef72009-12-03 12:59:48 -0500477#endif
478
Tejun Heo32e380a2012-03-05 13:14:54 -0800479#ifdef CONFIG_BLK_CGROUP
Vivek Goyal31e4c282009-12-03 12:59:42 -0500480extern struct blkio_cgroup blkio_root_cgroup;
481extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800482extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
Tejun Heocd1604f2012-03-05 13:15:06 -0800483extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -0800484 struct request_queue *q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800485struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
486 struct request_queue *q,
Tejun Heocd1604f2012-03-05 13:15:06 -0800487 bool for_root);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700488void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800489 struct blkio_policy_type *pol,
490 unsigned long time,
491 unsigned long unaccounted_time);
492void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
493 struct blkio_policy_type *pol,
494 uint64_t bytes, bool direction, bool sync);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200495void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800496 struct blkio_policy_type *pol,
497 uint64_t start_time,
498 uint64_t io_start_time, bool direction,
499 bool sync);
500void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
501 struct blkio_policy_type *pol,
502 bool direction, bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200503void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800504 struct blkio_policy_type *pol,
505 struct blkio_group *curr_blkg, bool direction,
506 bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200507void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800508 struct blkio_policy_type *pol,
509 bool direction, bool sync);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500510#else
Jens Axboe2f5ea472009-12-03 21:06:43 +0100511struct cgroup;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500512static inline struct blkio_cgroup *
513cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
Vivek Goyal70087dc2011-05-16 15:24:08 +0200514static inline struct blkio_cgroup *
Tejun Heo4f85cb92012-03-05 13:15:28 -0800515bio_blkio_cgroup(struct bio *bio) { return NULL; }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500516
Tejun Heocd1604f2012-03-05 13:15:06 -0800517static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
518 void *key) { return NULL; }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700519static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800520 struct blkio_policy_type *pol, unsigned long time,
521 unsigned long unaccounted_time) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200522static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800523 struct blkio_policy_type *pol, uint64_t bytes,
524 bool direction, bool sync) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200525static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800526 struct blkio_policy_type *pol, uint64_t start_time,
527 uint64_t io_start_time, bool direction, bool sync) { }
Divyesh Shah812d4022010-04-08 21:14:23 -0700528static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800529 struct blkio_policy_type *pol, bool direction,
530 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200531static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800532 struct blkio_policy_type *pol,
533 struct blkio_group *curr_blkg, bool direction,
534 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200535static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800536 struct blkio_policy_type *pol, bool direction,
537 bool sync) { }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500538#endif
539#endif /* _BLK_CGROUP_H */