blob: 7331d7965a5e334991cab0d023ddacca178367fc [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050018
Vivek Goyal062a6442010-09-15 17:06:33 -040019enum blkio_policy_id {
20 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040021 BLKIO_POLICY_THROTL, /* Throttling */
Tejun Heo035d10b2012-03-05 13:15:04 -080022
23 BLKIO_NR_POLICIES,
Vivek Goyal062a6442010-09-15 17:06:33 -040024};
25
Vivek Goyal9355aed2010-10-01 21:16:41 +020026/* Max limits for throttle policy */
27#define THROTL_IOPS_MAX UINT_MAX
28
Tejun Heo32e380a2012-03-05 13:14:54 -080029#ifdef CONFIG_BLK_CGROUP
Jens Axboe2f5ea472009-12-03 21:06:43 +010030
Tejun Heod3d32e692012-04-01 14:38:42 -070031/* cft->private [un]packing for stat printing */
32#define BLKCG_STAT_PRIV(pol, off) (((unsigned)(pol) << 16) | (off))
33#define BLKCG_STAT_POL(prv) ((unsigned)(prv) >> 16)
34#define BLKCG_STAT_OFF(prv) ((unsigned)(prv) & 0xffff)
Tejun Heo2aa4a152012-04-01 14:38:42 -070035
Tejun Heoedcb0722012-04-01 14:38:42 -070036enum blkg_rwstat_type {
37 BLKG_RWSTAT_READ,
38 BLKG_RWSTAT_WRITE,
39 BLKG_RWSTAT_SYNC,
40 BLKG_RWSTAT_ASYNC,
41
42 BLKG_RWSTAT_NR,
43 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070044};
45
Divyesh Shah812df482010-04-08 21:15:35 -070046/* blkg state flags */
47enum blkg_state_flags {
48 BLKG_waiting = 0,
49 BLKG_idling,
50 BLKG_empty,
51};
52
Vivek Goyal062a6442010-09-15 17:06:33 -040053/* cgroup files owned by proportional weight policy */
54enum blkcg_file_name_prop {
55 BLKIO_PROP_weight = 1,
56 BLKIO_PROP_weight_device,
Vivek Goyal062a6442010-09-15 17:06:33 -040057};
58
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040059/* cgroup files owned by throttle policy */
60enum blkcg_file_name_throtl {
61 BLKIO_THROTL_read_bps_device,
62 BLKIO_THROTL_write_bps_device,
Vivek Goyal7702e8f2010-09-15 17:06:36 -040063 BLKIO_THROTL_read_iops_device,
64 BLKIO_THROTL_write_iops_device,
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040065};
66
Vivek Goyal31e4c282009-12-03 12:59:42 -050067struct blkio_cgroup {
68 struct cgroup_subsys_state css;
69 unsigned int weight;
70 spinlock_t lock;
71 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070072
73 /* for policies to test whether associated blkcg has changed */
74 uint64_t id;
Vivek Goyal31e4c282009-12-03 12:59:42 -050075};
76
Tejun Heoedcb0722012-04-01 14:38:42 -070077struct blkg_stat {
78 struct u64_stats_sync syncp;
79 uint64_t cnt;
80};
81
82struct blkg_rwstat {
83 struct u64_stats_sync syncp;
84 uint64_t cnt[BLKG_RWSTAT_NR];
85};
86
Divyesh Shah303a3ac2010-04-01 15:01:24 -070087struct blkio_group_stats {
Tejun Heoedcb0722012-04-01 14:38:42 -070088 /* number of ios merged */
89 struct blkg_rwstat merged;
90 /* total time spent on device in ns, may not be accurate w/ queueing */
91 struct blkg_rwstat service_time;
92 /* total time spent waiting in scheduler queue in ns */
93 struct blkg_rwstat wait_time;
94 /* number of IOs queued up */
95 struct blkg_rwstat queued;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070096 /* total disk time and nr sectors dispatched by this group */
Tejun Heoedcb0722012-04-01 14:38:42 -070097 struct blkg_stat time;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070098#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoedcb0722012-04-01 14:38:42 -070099 /* time not charged to this cgroup */
100 struct blkg_stat unaccounted_time;
101 /* sum of number of ios queued across all samples */
102 struct blkg_stat avg_queue_size_sum;
103 /* count of samples taken for average */
104 struct blkg_stat avg_queue_size_samples;
105 /* how many times this group has been removed from service tree */
106 struct blkg_stat dequeue;
107 /* total time spent waiting for it to be assigned a timeslice. */
108 struct blkg_stat group_wait_time;
109 /* time spent idling for this blkio_group */
110 struct blkg_stat idle_time;
111 /* total time with empty current active q with other requests queued */
112 struct blkg_stat empty_time;
Tejun Heo997a0262012-03-08 10:53:58 -0800113 /* fields after this shouldn't be cleared on stat reset */
Tejun Heoedcb0722012-04-01 14:38:42 -0700114 uint64_t start_group_wait_time;
115 uint64_t start_idle_time;
116 uint64_t start_empty_time;
117 uint16_t flags;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700118#endif
119};
120
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400121/* Per cpu blkio group stats */
122struct blkio_group_stats_cpu {
Tejun Heoedcb0722012-04-01 14:38:42 -0700123 /* total bytes transferred */
124 struct blkg_rwstat service_bytes;
125 /* total IOs serviced, post merge */
126 struct blkg_rwstat serviced;
127 /* total sectors transferred */
128 struct blkg_stat sectors;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400129};
130
Tejun Heoe56da7e2012-03-05 13:15:07 -0800131struct blkio_group_conf {
132 unsigned int weight;
133 unsigned int iops[2];
134 u64 bps[2];
135};
136
Tejun Heo03814112012-03-05 13:15:14 -0800137/* per-blkg per-policy data */
138struct blkg_policy_data {
139 /* the blkg this per-policy data belongs to */
140 struct blkio_group *blkg;
141
Tejun Heo549d3aa2012-03-05 13:15:16 -0800142 /* Configuration */
143 struct blkio_group_conf conf;
144
145 struct blkio_group_stats stats;
146 /* Per cpu stats pointer */
147 struct blkio_group_stats_cpu __percpu *stats_cpu;
148
Tejun Heo03814112012-03-05 13:15:14 -0800149 /* pol->pdata_size bytes of private data used by policy impl */
150 char pdata[] __aligned(__alignof__(unsigned long long));
151};
152
Vivek Goyal31e4c282009-12-03 12:59:42 -0500153struct blkio_group {
Tejun Heoc875f4d2012-03-05 13:15:22 -0800154 /* Pointer to the associated request_queue */
155 struct request_queue *q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800156 struct list_head q_node;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500157 struct hlist_node blkcg_node;
Tejun Heo7ee9c562012-03-05 13:15:11 -0800158 struct blkio_cgroup *blkcg;
Vivek Goyal2868ef72009-12-03 12:59:48 -0500159 /* Store cgroup path */
160 char path[128];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800161 /* reference count */
162 int refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500163
Tejun Heo549d3aa2012-03-05 13:15:16 -0800164 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800165
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800166 /* List of blkg waiting for per cpu stats memory to be allocated */
167 struct list_head alloc_node;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800168 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500169};
170
Tejun Heo03814112012-03-05 13:15:14 -0800171typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
Tejun Heoca32aef2012-03-05 13:15:03 -0800172typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200173 struct blkio_group *blkg, unsigned int weight);
Tejun Heoca32aef2012-03-05 13:15:03 -0800174typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200175 struct blkio_group *blkg, u64 read_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800176typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200177 struct blkio_group *blkg, u64 write_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800178typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200179 struct blkio_group *blkg, unsigned int read_iops);
Tejun Heoca32aef2012-03-05 13:15:03 -0800180typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200181 struct blkio_group *blkg, unsigned int write_iops);
Vivek Goyal3e252062009-12-04 10:36:42 -0500182
183struct blkio_policy_ops {
Tejun Heo03814112012-03-05 13:15:14 -0800184 blkio_init_group_fn *blkio_init_group_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500185 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400186 blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
187 blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400188 blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
189 blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500190};
191
192struct blkio_policy_type {
193 struct list_head list;
194 struct blkio_policy_ops ops;
Vivek Goyal062a6442010-09-15 17:06:33 -0400195 enum blkio_policy_id plid;
Tejun Heo03814112012-03-05 13:15:14 -0800196 size_t pdata_size; /* policy specific private data size */
Vivek Goyal3e252062009-12-04 10:36:42 -0500197};
198
Tejun Heo5efd6112012-03-05 13:15:12 -0800199extern int blkcg_init_queue(struct request_queue *q);
200extern void blkcg_drain_queue(struct request_queue *q);
201extern void blkcg_exit_queue(struct request_queue *q);
202
Vivek Goyal3e252062009-12-04 10:36:42 -0500203/* Blkio controller policy registration */
204extern void blkio_policy_register(struct blkio_policy_type *);
205extern void blkio_policy_unregister(struct blkio_policy_type *);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800206extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
207extern void update_root_blkg_pd(struct request_queue *q,
208 enum blkio_policy_id plid);
Vivek Goyal3e252062009-12-04 10:36:42 -0500209
Tejun Heo03814112012-03-05 13:15:14 -0800210/**
211 * blkg_to_pdata - get policy private data
212 * @blkg: blkg of interest
213 * @pol: policy of interest
214 *
215 * Return pointer to private data associated with the @blkg-@pol pair.
216 */
217static inline void *blkg_to_pdata(struct blkio_group *blkg,
218 struct blkio_policy_type *pol)
219{
Tejun Heo549d3aa2012-03-05 13:15:16 -0800220 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800221}
222
223/**
224 * pdata_to_blkg - get blkg associated with policy private data
225 * @pdata: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800226 *
Tejun Heoaaec55a2012-04-01 14:38:42 -0700227 * @pdata is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800228 */
Tejun Heoaaec55a2012-04-01 14:38:42 -0700229static inline struct blkio_group *pdata_to_blkg(void *pdata)
Tejun Heo03814112012-03-05 13:15:14 -0800230{
231 if (pdata) {
232 struct blkg_policy_data *pd =
233 container_of(pdata, struct blkg_policy_data, pdata);
234 return pd->blkg;
235 }
236 return NULL;
237}
238
Vivek Goyalafc24d42010-04-26 19:27:56 +0200239static inline char *blkg_path(struct blkio_group *blkg)
240{
241 return blkg->path;
242}
243
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800244/**
245 * blkg_get - get a blkg reference
246 * @blkg: blkg to get
247 *
248 * The caller should be holding queue_lock and an existing reference.
249 */
250static inline void blkg_get(struct blkio_group *blkg)
251{
252 lockdep_assert_held(blkg->q->queue_lock);
253 WARN_ON_ONCE(!blkg->refcnt);
254 blkg->refcnt++;
255}
256
257void __blkg_release(struct blkio_group *blkg);
258
259/**
260 * blkg_put - put a blkg reference
261 * @blkg: blkg to put
262 *
263 * The caller should be holding queue_lock.
264 */
265static inline void blkg_put(struct blkio_group *blkg)
266{
267 lockdep_assert_held(blkg->q->queue_lock);
268 WARN_ON_ONCE(blkg->refcnt <= 0);
269 if (!--blkg->refcnt)
270 __blkg_release(blkg);
271}
272
Tejun Heoedcb0722012-04-01 14:38:42 -0700273/**
274 * blkg_stat_add - add a value to a blkg_stat
275 * @stat: target blkg_stat
276 * @val: value to add
277 *
278 * Add @val to @stat. The caller is responsible for synchronizing calls to
279 * this function.
280 */
281static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
282{
283 u64_stats_update_begin(&stat->syncp);
284 stat->cnt += val;
285 u64_stats_update_end(&stat->syncp);
286}
287
288/**
289 * blkg_stat_read - read the current value of a blkg_stat
290 * @stat: blkg_stat to read
291 *
292 * Read the current value of @stat. This function can be called without
293 * synchroniztion and takes care of u64 atomicity.
294 */
295static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
296{
297 unsigned int start;
298 uint64_t v;
299
300 do {
301 start = u64_stats_fetch_begin(&stat->syncp);
302 v = stat->cnt;
303 } while (u64_stats_fetch_retry(&stat->syncp, start));
304
305 return v;
306}
307
308/**
309 * blkg_stat_reset - reset a blkg_stat
310 * @stat: blkg_stat to reset
311 */
312static inline void blkg_stat_reset(struct blkg_stat *stat)
313{
314 stat->cnt = 0;
315}
316
317/**
318 * blkg_rwstat_add - add a value to a blkg_rwstat
319 * @rwstat: target blkg_rwstat
320 * @rw: mask of REQ_{WRITE|SYNC}
321 * @val: value to add
322 *
323 * Add @val to @rwstat. The counters are chosen according to @rw. The
324 * caller is responsible for synchronizing calls to this function.
325 */
326static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
327 int rw, uint64_t val)
328{
329 u64_stats_update_begin(&rwstat->syncp);
330
331 if (rw & REQ_WRITE)
332 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
333 else
334 rwstat->cnt[BLKG_RWSTAT_READ] += val;
335 if (rw & REQ_SYNC)
336 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
337 else
338 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
339
340 u64_stats_update_end(&rwstat->syncp);
341}
342
343/**
344 * blkg_rwstat_read - read the current values of a blkg_rwstat
345 * @rwstat: blkg_rwstat to read
346 *
347 * Read the current snapshot of @rwstat and return it as the return value.
348 * This function can be called without synchronization and takes care of
349 * u64 atomicity.
350 */
351static struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
352{
353 unsigned int start;
354 struct blkg_rwstat tmp;
355
356 do {
357 start = u64_stats_fetch_begin(&rwstat->syncp);
358 tmp = *rwstat;
359 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
360
361 return tmp;
362}
363
364/**
365 * blkg_rwstat_sum - read the total count of a blkg_rwstat
366 * @rwstat: blkg_rwstat to read
367 *
368 * Return the total count of @rwstat regardless of the IO direction. This
369 * function can be called without synchronization and takes care of u64
370 * atomicity.
371 */
372static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
373{
374 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
375
376 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
377}
378
379/**
380 * blkg_rwstat_reset - reset a blkg_rwstat
381 * @rwstat: blkg_rwstat to reset
382 */
383static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
384{
385 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
386}
387
Jens Axboe2f5ea472009-12-03 21:06:43 +0100388#else
389
390struct blkio_group {
391};
392
Vivek Goyal3e252062009-12-04 10:36:42 -0500393struct blkio_policy_type {
394};
395
Tejun Heo5efd6112012-03-05 13:15:12 -0800396static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
397static inline void blkcg_drain_queue(struct request_queue *q) { }
398static inline void blkcg_exit_queue(struct request_queue *q) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500399static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
400static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
Tejun Heo03aa264a2012-03-05 13:15:19 -0800401static inline void blkg_destroy_all(struct request_queue *q,
Tejun Heo03aa264a2012-03-05 13:15:19 -0800402 bool destory_root) { }
Tejun Heoe8989fa2012-03-05 13:15:20 -0800403static inline void update_root_blkg_pd(struct request_queue *q,
404 enum blkio_policy_id plid) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500405
Tejun Heo03814112012-03-05 13:15:14 -0800406static inline void *blkg_to_pdata(struct blkio_group *blkg,
407 struct blkio_policy_type *pol) { return NULL; }
408static inline struct blkio_group *pdata_to_blkg(void *pdata,
409 struct blkio_policy_type *pol) { return NULL; }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200410static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800411static inline void blkg_get(struct blkio_group *blkg) { }
412static inline void blkg_put(struct blkio_group *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200413
Jens Axboe2f5ea472009-12-03 21:06:43 +0100414#endif
415
Justin TerAvestdf457f82011-03-08 19:45:00 +0100416#define BLKIO_WEIGHT_MIN 10
Vivek Goyal31e4c282009-12-03 12:59:42 -0500417#define BLKIO_WEIGHT_MAX 1000
418#define BLKIO_WEIGHT_DEFAULT 500
419
Vivek Goyal2868ef72009-12-03 12:59:48 -0500420#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoc1768262012-03-05 13:15:17 -0800421void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
422 struct blkio_policy_type *pol);
Divyesh Shah91952912010-04-01 15:01:41 -0700423void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800424 struct blkio_policy_type *pol,
425 unsigned long dequeue);
426void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
427 struct blkio_policy_type *pol);
428void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
429 struct blkio_policy_type *pol);
430void blkiocg_set_start_empty_time(struct blkio_group *blkg,
431 struct blkio_policy_type *pol);
Divyesh Shah812df482010-04-08 21:15:35 -0700432
433#define BLKG_FLAG_FNS(name) \
434static inline void blkio_mark_blkg_##name( \
435 struct blkio_group_stats *stats) \
436{ \
437 stats->flags |= (1 << BLKG_##name); \
438} \
439static inline void blkio_clear_blkg_##name( \
440 struct blkio_group_stats *stats) \
441{ \
442 stats->flags &= ~(1 << BLKG_##name); \
443} \
444static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
445{ \
446 return (stats->flags & (1 << BLKG_##name)) != 0; \
447} \
448
449BLKG_FLAG_FNS(waiting)
450BLKG_FLAG_FNS(idling)
451BLKG_FLAG_FNS(empty)
452#undef BLKG_FLAG_FNS
Vivek Goyal2868ef72009-12-03 12:59:48 -0500453#else
Tejun Heoc1768262012-03-05 13:15:17 -0800454static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
455 struct blkio_policy_type *pol) { }
Divyesh Shah91952912010-04-01 15:01:41 -0700456static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800457 struct blkio_policy_type *pol, unsigned long dequeue) { }
458static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
459 struct blkio_policy_type *pol) { }
460static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
461 struct blkio_policy_type *pol) { }
462static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
463 struct blkio_policy_type *pol) { }
Vivek Goyal2868ef72009-12-03 12:59:48 -0500464#endif
465
Tejun Heo32e380a2012-03-05 13:14:54 -0800466#ifdef CONFIG_BLK_CGROUP
Vivek Goyal31e4c282009-12-03 12:59:42 -0500467extern struct blkio_cgroup blkio_root_cgroup;
468extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800469extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
Tejun Heocd1604f2012-03-05 13:15:06 -0800470extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -0800471 struct request_queue *q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800472struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
473 struct request_queue *q,
Tejun Heocd1604f2012-03-05 13:15:06 -0800474 bool for_root);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700475void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800476 struct blkio_policy_type *pol,
477 unsigned long time,
478 unsigned long unaccounted_time);
479void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
480 struct blkio_policy_type *pol,
481 uint64_t bytes, bool direction, bool sync);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200482void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800483 struct blkio_policy_type *pol,
484 uint64_t start_time,
485 uint64_t io_start_time, bool direction,
486 bool sync);
487void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
488 struct blkio_policy_type *pol,
489 bool direction, bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200490void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800491 struct blkio_policy_type *pol,
492 struct blkio_group *curr_blkg, bool direction,
493 bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200494void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800495 struct blkio_policy_type *pol,
496 bool direction, bool sync);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500497#else
Jens Axboe2f5ea472009-12-03 21:06:43 +0100498struct cgroup;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500499static inline struct blkio_cgroup *
500cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
Vivek Goyal70087dc2011-05-16 15:24:08 +0200501static inline struct blkio_cgroup *
Tejun Heo4f85cb92012-03-05 13:15:28 -0800502bio_blkio_cgroup(struct bio *bio) { return NULL; }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500503
Tejun Heocd1604f2012-03-05 13:15:06 -0800504static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
505 void *key) { return NULL; }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700506static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800507 struct blkio_policy_type *pol, unsigned long time,
508 unsigned long unaccounted_time) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200509static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800510 struct blkio_policy_type *pol, uint64_t bytes,
511 bool direction, bool sync) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200512static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800513 struct blkio_policy_type *pol, uint64_t start_time,
514 uint64_t io_start_time, bool direction, bool sync) { }
Divyesh Shah812d4022010-04-08 21:14:23 -0700515static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800516 struct blkio_policy_type *pol, bool direction,
517 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200518static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800519 struct blkio_policy_type *pol,
520 struct blkio_group *curr_blkg, bool direction,
521 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200522static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800523 struct blkio_policy_type *pol, bool direction,
524 bool sync) { }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500525#endif
526#endif /* _BLK_CGROUP_H */