blob: fa744d57bebbd17093c98d4922c63bb7f3d20833 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050019
Vivek Goyal062a6442010-09-15 17:06:33 -040020enum blkio_policy_id {
21 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040022 BLKIO_POLICY_THROTL, /* Throttling */
Tejun Heo035d10b2012-03-05 13:15:04 -080023
24 BLKIO_NR_POLICIES,
Vivek Goyal062a6442010-09-15 17:06:33 -040025};
26
Vivek Goyal9355aed2010-10-01 21:16:41 +020027/* Max limits for throttle policy */
28#define THROTL_IOPS_MAX UINT_MAX
29
Tejun Heo32e380a2012-03-05 13:14:54 -080030#ifdef CONFIG_BLK_CGROUP
Jens Axboe2f5ea472009-12-03 21:06:43 +010031
Tejun Heod3d32e692012-04-01 14:38:42 -070032/* cft->private [un]packing for stat printing */
33#define BLKCG_STAT_PRIV(pol, off) (((unsigned)(pol) << 16) | (off))
34#define BLKCG_STAT_POL(prv) ((unsigned)(prv) >> 16)
35#define BLKCG_STAT_OFF(prv) ((unsigned)(prv) & 0xffff)
Tejun Heo2aa4a152012-04-01 14:38:42 -070036
Tejun Heoedcb0722012-04-01 14:38:42 -070037enum blkg_rwstat_type {
38 BLKG_RWSTAT_READ,
39 BLKG_RWSTAT_WRITE,
40 BLKG_RWSTAT_SYNC,
41 BLKG_RWSTAT_ASYNC,
42
43 BLKG_RWSTAT_NR,
44 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070045};
46
Divyesh Shah812df482010-04-08 21:15:35 -070047/* blkg state flags */
48enum blkg_state_flags {
49 BLKG_waiting = 0,
50 BLKG_idling,
51 BLKG_empty,
52};
53
Vivek Goyal31e4c282009-12-03 12:59:42 -050054struct blkio_cgroup {
55 struct cgroup_subsys_state css;
56 unsigned int weight;
57 spinlock_t lock;
58 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070059
60 /* for policies to test whether associated blkcg has changed */
61 uint64_t id;
Vivek Goyal31e4c282009-12-03 12:59:42 -050062};
63
Tejun Heoedcb0722012-04-01 14:38:42 -070064struct blkg_stat {
65 struct u64_stats_sync syncp;
66 uint64_t cnt;
67};
68
69struct blkg_rwstat {
70 struct u64_stats_sync syncp;
71 uint64_t cnt[BLKG_RWSTAT_NR];
72};
73
Divyesh Shah303a3ac2010-04-01 15:01:24 -070074struct blkio_group_stats {
Tejun Heoedcb0722012-04-01 14:38:42 -070075 /* number of ios merged */
76 struct blkg_rwstat merged;
77 /* total time spent on device in ns, may not be accurate w/ queueing */
78 struct blkg_rwstat service_time;
79 /* total time spent waiting in scheduler queue in ns */
80 struct blkg_rwstat wait_time;
81 /* number of IOs queued up */
82 struct blkg_rwstat queued;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070083 /* total disk time and nr sectors dispatched by this group */
Tejun Heoedcb0722012-04-01 14:38:42 -070084 struct blkg_stat time;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070085#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoedcb0722012-04-01 14:38:42 -070086 /* time not charged to this cgroup */
87 struct blkg_stat unaccounted_time;
88 /* sum of number of ios queued across all samples */
89 struct blkg_stat avg_queue_size_sum;
90 /* count of samples taken for average */
91 struct blkg_stat avg_queue_size_samples;
92 /* how many times this group has been removed from service tree */
93 struct blkg_stat dequeue;
94 /* total time spent waiting for it to be assigned a timeslice. */
95 struct blkg_stat group_wait_time;
96 /* time spent idling for this blkio_group */
97 struct blkg_stat idle_time;
98 /* total time with empty current active q with other requests queued */
99 struct blkg_stat empty_time;
Tejun Heo997a0262012-03-08 10:53:58 -0800100 /* fields after this shouldn't be cleared on stat reset */
Tejun Heoedcb0722012-04-01 14:38:42 -0700101 uint64_t start_group_wait_time;
102 uint64_t start_idle_time;
103 uint64_t start_empty_time;
104 uint16_t flags;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700105#endif
106};
107
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400108/* Per cpu blkio group stats */
109struct blkio_group_stats_cpu {
Tejun Heoedcb0722012-04-01 14:38:42 -0700110 /* total bytes transferred */
111 struct blkg_rwstat service_bytes;
112 /* total IOs serviced, post merge */
113 struct blkg_rwstat serviced;
114 /* total sectors transferred */
115 struct blkg_stat sectors;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400116};
117
Tejun Heoe56da7e2012-03-05 13:15:07 -0800118struct blkio_group_conf {
119 unsigned int weight;
Tejun Heoc4682ae2012-04-01 14:38:43 -0700120 u64 iops[2];
Tejun Heoe56da7e2012-03-05 13:15:07 -0800121 u64 bps[2];
122};
123
Tejun Heo03814112012-03-05 13:15:14 -0800124/* per-blkg per-policy data */
125struct blkg_policy_data {
126 /* the blkg this per-policy data belongs to */
127 struct blkio_group *blkg;
128
Tejun Heo549d3aa2012-03-05 13:15:16 -0800129 /* Configuration */
130 struct blkio_group_conf conf;
131
132 struct blkio_group_stats stats;
133 /* Per cpu stats pointer */
134 struct blkio_group_stats_cpu __percpu *stats_cpu;
135
Tejun Heo03814112012-03-05 13:15:14 -0800136 /* pol->pdata_size bytes of private data used by policy impl */
137 char pdata[] __aligned(__alignof__(unsigned long long));
138};
139
Vivek Goyal31e4c282009-12-03 12:59:42 -0500140struct blkio_group {
Tejun Heoc875f4d2012-03-05 13:15:22 -0800141 /* Pointer to the associated request_queue */
142 struct request_queue *q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800143 struct list_head q_node;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500144 struct hlist_node blkcg_node;
Tejun Heo7ee9c562012-03-05 13:15:11 -0800145 struct blkio_cgroup *blkcg;
Vivek Goyal2868ef72009-12-03 12:59:48 -0500146 /* Store cgroup path */
147 char path[128];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800148 /* reference count */
149 int refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500150
Tejun Heo549d3aa2012-03-05 13:15:16 -0800151 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800152
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800153 /* List of blkg waiting for per cpu stats memory to be allocated */
154 struct list_head alloc_node;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800155 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500156};
157
Tejun Heo03814112012-03-05 13:15:14 -0800158typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
Tejun Heoca32aef2012-03-05 13:15:03 -0800159typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200160 struct blkio_group *blkg, unsigned int weight);
Tejun Heoca32aef2012-03-05 13:15:03 -0800161typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200162 struct blkio_group *blkg, u64 read_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800163typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200164 struct blkio_group *blkg, u64 write_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800165typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200166 struct blkio_group *blkg, unsigned int read_iops);
Tejun Heoca32aef2012-03-05 13:15:03 -0800167typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200168 struct blkio_group *blkg, unsigned int write_iops);
Vivek Goyal3e252062009-12-04 10:36:42 -0500169
170struct blkio_policy_ops {
Tejun Heo03814112012-03-05 13:15:14 -0800171 blkio_init_group_fn *blkio_init_group_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500172 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400173 blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
174 blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400175 blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
176 blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500177};
178
179struct blkio_policy_type {
180 struct list_head list;
181 struct blkio_policy_ops ops;
Vivek Goyal062a6442010-09-15 17:06:33 -0400182 enum blkio_policy_id plid;
Tejun Heo03814112012-03-05 13:15:14 -0800183 size_t pdata_size; /* policy specific private data size */
Tejun Heo44ea53d2012-04-01 14:38:43 -0700184 struct cftype *cftypes; /* cgroup files for the policy */
Vivek Goyal3e252062009-12-04 10:36:42 -0500185};
186
Tejun Heo5efd6112012-03-05 13:15:12 -0800187extern int blkcg_init_queue(struct request_queue *q);
188extern void blkcg_drain_queue(struct request_queue *q);
189extern void blkcg_exit_queue(struct request_queue *q);
190
Vivek Goyal3e252062009-12-04 10:36:42 -0500191/* Blkio controller policy registration */
192extern void blkio_policy_register(struct blkio_policy_type *);
193extern void blkio_policy_unregister(struct blkio_policy_type *);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800194extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
195extern void update_root_blkg_pd(struct request_queue *q,
196 enum blkio_policy_id plid);
Vivek Goyal3e252062009-12-04 10:36:42 -0500197
Tejun Heo829fdb52012-04-01 14:38:43 -0700198void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
199 u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int),
200 int pol, int data, bool show_total);
201u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
202u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
203 const struct blkg_rwstat *rwstat);
204int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
205 struct seq_file *sf);
206int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
207 struct seq_file *sf);
208int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
209 struct seq_file *sf);
210int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
211 struct seq_file *sf);
212
213struct blkg_conf_ctx {
214 struct gendisk *disk;
215 struct blkio_group *blkg;
216 u64 v;
217};
218
219int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
220 struct blkg_conf_ctx *ctx);
221void blkg_conf_finish(struct blkg_conf_ctx *ctx);
222
223
Tejun Heo03814112012-03-05 13:15:14 -0800224/**
225 * blkg_to_pdata - get policy private data
226 * @blkg: blkg of interest
227 * @pol: policy of interest
228 *
229 * Return pointer to private data associated with the @blkg-@pol pair.
230 */
231static inline void *blkg_to_pdata(struct blkio_group *blkg,
232 struct blkio_policy_type *pol)
233{
Tejun Heo549d3aa2012-03-05 13:15:16 -0800234 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800235}
236
237/**
238 * pdata_to_blkg - get blkg associated with policy private data
239 * @pdata: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800240 *
Tejun Heoaaec55a2012-04-01 14:38:42 -0700241 * @pdata is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800242 */
Tejun Heoaaec55a2012-04-01 14:38:42 -0700243static inline struct blkio_group *pdata_to_blkg(void *pdata)
Tejun Heo03814112012-03-05 13:15:14 -0800244{
245 if (pdata) {
246 struct blkg_policy_data *pd =
247 container_of(pdata, struct blkg_policy_data, pdata);
248 return pd->blkg;
249 }
250 return NULL;
251}
252
Vivek Goyalafc24d42010-04-26 19:27:56 +0200253static inline char *blkg_path(struct blkio_group *blkg)
254{
255 return blkg->path;
256}
257
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800258/**
259 * blkg_get - get a blkg reference
260 * @blkg: blkg to get
261 *
262 * The caller should be holding queue_lock and an existing reference.
263 */
264static inline void blkg_get(struct blkio_group *blkg)
265{
266 lockdep_assert_held(blkg->q->queue_lock);
267 WARN_ON_ONCE(!blkg->refcnt);
268 blkg->refcnt++;
269}
270
271void __blkg_release(struct blkio_group *blkg);
272
273/**
274 * blkg_put - put a blkg reference
275 * @blkg: blkg to put
276 *
277 * The caller should be holding queue_lock.
278 */
279static inline void blkg_put(struct blkio_group *blkg)
280{
281 lockdep_assert_held(blkg->q->queue_lock);
282 WARN_ON_ONCE(blkg->refcnt <= 0);
283 if (!--blkg->refcnt)
284 __blkg_release(blkg);
285}
286
Tejun Heoedcb0722012-04-01 14:38:42 -0700287/**
288 * blkg_stat_add - add a value to a blkg_stat
289 * @stat: target blkg_stat
290 * @val: value to add
291 *
292 * Add @val to @stat. The caller is responsible for synchronizing calls to
293 * this function.
294 */
295static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
296{
297 u64_stats_update_begin(&stat->syncp);
298 stat->cnt += val;
299 u64_stats_update_end(&stat->syncp);
300}
301
302/**
303 * blkg_stat_read - read the current value of a blkg_stat
304 * @stat: blkg_stat to read
305 *
306 * Read the current value of @stat. This function can be called without
307 * synchroniztion and takes care of u64 atomicity.
308 */
309static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
310{
311 unsigned int start;
312 uint64_t v;
313
314 do {
315 start = u64_stats_fetch_begin(&stat->syncp);
316 v = stat->cnt;
317 } while (u64_stats_fetch_retry(&stat->syncp, start));
318
319 return v;
320}
321
322/**
323 * blkg_stat_reset - reset a blkg_stat
324 * @stat: blkg_stat to reset
325 */
326static inline void blkg_stat_reset(struct blkg_stat *stat)
327{
328 stat->cnt = 0;
329}
330
331/**
332 * blkg_rwstat_add - add a value to a blkg_rwstat
333 * @rwstat: target blkg_rwstat
334 * @rw: mask of REQ_{WRITE|SYNC}
335 * @val: value to add
336 *
337 * Add @val to @rwstat. The counters are chosen according to @rw. The
338 * caller is responsible for synchronizing calls to this function.
339 */
340static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
341 int rw, uint64_t val)
342{
343 u64_stats_update_begin(&rwstat->syncp);
344
345 if (rw & REQ_WRITE)
346 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
347 else
348 rwstat->cnt[BLKG_RWSTAT_READ] += val;
349 if (rw & REQ_SYNC)
350 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
351 else
352 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
353
354 u64_stats_update_end(&rwstat->syncp);
355}
356
357/**
358 * blkg_rwstat_read - read the current values of a blkg_rwstat
359 * @rwstat: blkg_rwstat to read
360 *
361 * Read the current snapshot of @rwstat and return it as the return value.
362 * This function can be called without synchronization and takes care of
363 * u64 atomicity.
364 */
365static struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
366{
367 unsigned int start;
368 struct blkg_rwstat tmp;
369
370 do {
371 start = u64_stats_fetch_begin(&rwstat->syncp);
372 tmp = *rwstat;
373 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
374
375 return tmp;
376}
377
378/**
379 * blkg_rwstat_sum - read the total count of a blkg_rwstat
380 * @rwstat: blkg_rwstat to read
381 *
382 * Return the total count of @rwstat regardless of the IO direction. This
383 * function can be called without synchronization and takes care of u64
384 * atomicity.
385 */
386static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
387{
388 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
389
390 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
391}
392
393/**
394 * blkg_rwstat_reset - reset a blkg_rwstat
395 * @rwstat: blkg_rwstat to reset
396 */
397static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
398{
399 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
400}
401
Jens Axboe2f5ea472009-12-03 21:06:43 +0100402#else
403
404struct blkio_group {
405};
406
Vivek Goyal3e252062009-12-04 10:36:42 -0500407struct blkio_policy_type {
408};
409
Tejun Heo5efd6112012-03-05 13:15:12 -0800410static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
411static inline void blkcg_drain_queue(struct request_queue *q) { }
412static inline void blkcg_exit_queue(struct request_queue *q) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500413static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
414static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
Tejun Heo03aa264a2012-03-05 13:15:19 -0800415static inline void blkg_destroy_all(struct request_queue *q,
Tejun Heo03aa264a2012-03-05 13:15:19 -0800416 bool destory_root) { }
Tejun Heoe8989fa2012-03-05 13:15:20 -0800417static inline void update_root_blkg_pd(struct request_queue *q,
418 enum blkio_policy_id plid) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500419
Tejun Heo03814112012-03-05 13:15:14 -0800420static inline void *blkg_to_pdata(struct blkio_group *blkg,
421 struct blkio_policy_type *pol) { return NULL; }
422static inline struct blkio_group *pdata_to_blkg(void *pdata,
423 struct blkio_policy_type *pol) { return NULL; }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200424static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800425static inline void blkg_get(struct blkio_group *blkg) { }
426static inline void blkg_put(struct blkio_group *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200427
Jens Axboe2f5ea472009-12-03 21:06:43 +0100428#endif
429
Justin TerAvestdf457f82011-03-08 19:45:00 +0100430#define BLKIO_WEIGHT_MIN 10
Vivek Goyal31e4c282009-12-03 12:59:42 -0500431#define BLKIO_WEIGHT_MAX 1000
432#define BLKIO_WEIGHT_DEFAULT 500
433
Vivek Goyal2868ef72009-12-03 12:59:48 -0500434#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoc1768262012-03-05 13:15:17 -0800435void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
436 struct blkio_policy_type *pol);
Divyesh Shah91952912010-04-01 15:01:41 -0700437void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800438 struct blkio_policy_type *pol,
439 unsigned long dequeue);
440void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
441 struct blkio_policy_type *pol);
442void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
443 struct blkio_policy_type *pol);
444void blkiocg_set_start_empty_time(struct blkio_group *blkg,
445 struct blkio_policy_type *pol);
Divyesh Shah812df482010-04-08 21:15:35 -0700446
447#define BLKG_FLAG_FNS(name) \
448static inline void blkio_mark_blkg_##name( \
449 struct blkio_group_stats *stats) \
450{ \
451 stats->flags |= (1 << BLKG_##name); \
452} \
453static inline void blkio_clear_blkg_##name( \
454 struct blkio_group_stats *stats) \
455{ \
456 stats->flags &= ~(1 << BLKG_##name); \
457} \
458static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
459{ \
460 return (stats->flags & (1 << BLKG_##name)) != 0; \
461} \
462
463BLKG_FLAG_FNS(waiting)
464BLKG_FLAG_FNS(idling)
465BLKG_FLAG_FNS(empty)
466#undef BLKG_FLAG_FNS
Vivek Goyal2868ef72009-12-03 12:59:48 -0500467#else
Tejun Heoc1768262012-03-05 13:15:17 -0800468static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
469 struct blkio_policy_type *pol) { }
Divyesh Shah91952912010-04-01 15:01:41 -0700470static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800471 struct blkio_policy_type *pol, unsigned long dequeue) { }
472static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
473 struct blkio_policy_type *pol) { }
474static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
475 struct blkio_policy_type *pol) { }
476static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
477 struct blkio_policy_type *pol) { }
Vivek Goyal2868ef72009-12-03 12:59:48 -0500478#endif
479
Tejun Heo32e380a2012-03-05 13:14:54 -0800480#ifdef CONFIG_BLK_CGROUP
Vivek Goyal31e4c282009-12-03 12:59:42 -0500481extern struct blkio_cgroup blkio_root_cgroup;
482extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800483extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
Tejun Heocd1604f2012-03-05 13:15:06 -0800484extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -0800485 struct request_queue *q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800486struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
487 struct request_queue *q,
Tejun Heocd1604f2012-03-05 13:15:06 -0800488 bool for_root);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700489void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800490 struct blkio_policy_type *pol,
491 unsigned long time,
492 unsigned long unaccounted_time);
493void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
494 struct blkio_policy_type *pol,
495 uint64_t bytes, bool direction, bool sync);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200496void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800497 struct blkio_policy_type *pol,
498 uint64_t start_time,
499 uint64_t io_start_time, bool direction,
500 bool sync);
501void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
502 struct blkio_policy_type *pol,
503 bool direction, bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200504void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800505 struct blkio_policy_type *pol,
506 struct blkio_group *curr_blkg, bool direction,
507 bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200508void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800509 struct blkio_policy_type *pol,
510 bool direction, bool sync);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500511#else
Jens Axboe2f5ea472009-12-03 21:06:43 +0100512struct cgroup;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500513static inline struct blkio_cgroup *
514cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
Vivek Goyal70087dc2011-05-16 15:24:08 +0200515static inline struct blkio_cgroup *
Tejun Heo4f85cb92012-03-05 13:15:28 -0800516bio_blkio_cgroup(struct bio *bio) { return NULL; }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500517
Tejun Heocd1604f2012-03-05 13:15:06 -0800518static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
519 void *key) { return NULL; }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700520static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800521 struct blkio_policy_type *pol, unsigned long time,
522 unsigned long unaccounted_time) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200523static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800524 struct blkio_policy_type *pol, uint64_t bytes,
525 bool direction, bool sync) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200526static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800527 struct blkio_policy_type *pol, uint64_t start_time,
528 uint64_t io_start_time, bool direction, bool sync) { }
Divyesh Shah812d4022010-04-08 21:14:23 -0700529static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800530 struct blkio_policy_type *pol, bool direction,
531 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200532static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800533 struct blkio_policy_type *pol,
534 struct blkio_group *curr_blkg, bool direction,
535 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200536static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800537 struct blkio_policy_type *pol, bool direction,
538 bool sync) { }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500539#endif
540#endif /* _BLK_CGROUP_H */