blob: b67eefa706c3db739b3df1c1d70b79b1b9231140 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050018
Vivek Goyal062a6442010-09-15 17:06:33 -040019enum blkio_policy_id {
20 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040021 BLKIO_POLICY_THROTL, /* Throttling */
Tejun Heo035d10b2012-03-05 13:15:04 -080022
23 BLKIO_NR_POLICIES,
Vivek Goyal062a6442010-09-15 17:06:33 -040024};
25
Vivek Goyal9355aed2010-10-01 21:16:41 +020026/* Max limits for throttle policy */
27#define THROTL_IOPS_MAX UINT_MAX
28
Tejun Heo32e380a2012-03-05 13:14:54 -080029#ifdef CONFIG_BLK_CGROUP
Jens Axboe2f5ea472009-12-03 21:06:43 +010030
Tejun Heod3d32e692012-04-01 14:38:42 -070031/* cft->private [un]packing for stat printing */
32#define BLKCG_STAT_PRIV(pol, off) (((unsigned)(pol) << 16) | (off))
33#define BLKCG_STAT_POL(prv) ((unsigned)(prv) >> 16)
34#define BLKCG_STAT_OFF(prv) ((unsigned)(prv) & 0xffff)
Tejun Heo2aa4a152012-04-01 14:38:42 -070035
Tejun Heoedcb0722012-04-01 14:38:42 -070036enum blkg_rwstat_type {
37 BLKG_RWSTAT_READ,
38 BLKG_RWSTAT_WRITE,
39 BLKG_RWSTAT_SYNC,
40 BLKG_RWSTAT_ASYNC,
41
42 BLKG_RWSTAT_NR,
43 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070044};
45
Divyesh Shah812df482010-04-08 21:15:35 -070046/* blkg state flags */
47enum blkg_state_flags {
48 BLKG_waiting = 0,
49 BLKG_idling,
50 BLKG_empty,
51};
52
Vivek Goyal062a6442010-09-15 17:06:33 -040053/* cgroup files owned by proportional weight policy */
54enum blkcg_file_name_prop {
Vivek Goyal062a6442010-09-15 17:06:33 -040055 BLKIO_PROP_weight_device,
Vivek Goyal062a6442010-09-15 17:06:33 -040056};
57
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040058/* cgroup files owned by throttle policy */
59enum blkcg_file_name_throtl {
60 BLKIO_THROTL_read_bps_device,
61 BLKIO_THROTL_write_bps_device,
Vivek Goyal7702e8f2010-09-15 17:06:36 -040062 BLKIO_THROTL_read_iops_device,
63 BLKIO_THROTL_write_iops_device,
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040064};
65
Vivek Goyal31e4c282009-12-03 12:59:42 -050066struct blkio_cgroup {
67 struct cgroup_subsys_state css;
68 unsigned int weight;
69 spinlock_t lock;
70 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070071
72 /* for policies to test whether associated blkcg has changed */
73 uint64_t id;
Vivek Goyal31e4c282009-12-03 12:59:42 -050074};
75
Tejun Heoedcb0722012-04-01 14:38:42 -070076struct blkg_stat {
77 struct u64_stats_sync syncp;
78 uint64_t cnt;
79};
80
81struct blkg_rwstat {
82 struct u64_stats_sync syncp;
83 uint64_t cnt[BLKG_RWSTAT_NR];
84};
85
Divyesh Shah303a3ac2010-04-01 15:01:24 -070086struct blkio_group_stats {
Tejun Heoedcb0722012-04-01 14:38:42 -070087 /* number of ios merged */
88 struct blkg_rwstat merged;
89 /* total time spent on device in ns, may not be accurate w/ queueing */
90 struct blkg_rwstat service_time;
91 /* total time spent waiting in scheduler queue in ns */
92 struct blkg_rwstat wait_time;
93 /* number of IOs queued up */
94 struct blkg_rwstat queued;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070095 /* total disk time and nr sectors dispatched by this group */
Tejun Heoedcb0722012-04-01 14:38:42 -070096 struct blkg_stat time;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070097#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoedcb0722012-04-01 14:38:42 -070098 /* time not charged to this cgroup */
99 struct blkg_stat unaccounted_time;
100 /* sum of number of ios queued across all samples */
101 struct blkg_stat avg_queue_size_sum;
102 /* count of samples taken for average */
103 struct blkg_stat avg_queue_size_samples;
104 /* how many times this group has been removed from service tree */
105 struct blkg_stat dequeue;
106 /* total time spent waiting for it to be assigned a timeslice. */
107 struct blkg_stat group_wait_time;
108 /* time spent idling for this blkio_group */
109 struct blkg_stat idle_time;
110 /* total time with empty current active q with other requests queued */
111 struct blkg_stat empty_time;
Tejun Heo997a0262012-03-08 10:53:58 -0800112 /* fields after this shouldn't be cleared on stat reset */
Tejun Heoedcb0722012-04-01 14:38:42 -0700113 uint64_t start_group_wait_time;
114 uint64_t start_idle_time;
115 uint64_t start_empty_time;
116 uint16_t flags;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700117#endif
118};
119
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400120/* Per cpu blkio group stats */
121struct blkio_group_stats_cpu {
Tejun Heoedcb0722012-04-01 14:38:42 -0700122 /* total bytes transferred */
123 struct blkg_rwstat service_bytes;
124 /* total IOs serviced, post merge */
125 struct blkg_rwstat serviced;
126 /* total sectors transferred */
127 struct blkg_stat sectors;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400128};
129
Tejun Heoe56da7e2012-03-05 13:15:07 -0800130struct blkio_group_conf {
131 unsigned int weight;
Tejun Heoc4682ae2012-04-01 14:38:43 -0700132 u64 iops[2];
Tejun Heoe56da7e2012-03-05 13:15:07 -0800133 u64 bps[2];
134};
135
Tejun Heo03814112012-03-05 13:15:14 -0800136/* per-blkg per-policy data */
137struct blkg_policy_data {
138 /* the blkg this per-policy data belongs to */
139 struct blkio_group *blkg;
140
Tejun Heo549d3aa2012-03-05 13:15:16 -0800141 /* Configuration */
142 struct blkio_group_conf conf;
143
144 struct blkio_group_stats stats;
145 /* Per cpu stats pointer */
146 struct blkio_group_stats_cpu __percpu *stats_cpu;
147
Tejun Heo03814112012-03-05 13:15:14 -0800148 /* pol->pdata_size bytes of private data used by policy impl */
149 char pdata[] __aligned(__alignof__(unsigned long long));
150};
151
Vivek Goyal31e4c282009-12-03 12:59:42 -0500152struct blkio_group {
Tejun Heoc875f4d2012-03-05 13:15:22 -0800153 /* Pointer to the associated request_queue */
154 struct request_queue *q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800155 struct list_head q_node;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500156 struct hlist_node blkcg_node;
Tejun Heo7ee9c562012-03-05 13:15:11 -0800157 struct blkio_cgroup *blkcg;
Vivek Goyal2868ef72009-12-03 12:59:48 -0500158 /* Store cgroup path */
159 char path[128];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800160 /* reference count */
161 int refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500162
Tejun Heo549d3aa2012-03-05 13:15:16 -0800163 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800164
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800165 /* List of blkg waiting for per cpu stats memory to be allocated */
166 struct list_head alloc_node;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800167 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500168};
169
Tejun Heo03814112012-03-05 13:15:14 -0800170typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
Tejun Heoca32aef2012-03-05 13:15:03 -0800171typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200172 struct blkio_group *blkg, unsigned int weight);
Tejun Heoca32aef2012-03-05 13:15:03 -0800173typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200174 struct blkio_group *blkg, u64 read_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800175typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200176 struct blkio_group *blkg, u64 write_bps);
Tejun Heoca32aef2012-03-05 13:15:03 -0800177typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200178 struct blkio_group *blkg, unsigned int read_iops);
Tejun Heoca32aef2012-03-05 13:15:03 -0800179typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +0200180 struct blkio_group *blkg, unsigned int write_iops);
Vivek Goyal3e252062009-12-04 10:36:42 -0500181
182struct blkio_policy_ops {
Tejun Heo03814112012-03-05 13:15:14 -0800183 blkio_init_group_fn *blkio_init_group_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500184 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
Vivek Goyal4c9eefa2010-09-15 17:06:34 -0400185 blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
186 blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
Vivek Goyal7702e8f2010-09-15 17:06:36 -0400187 blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
188 blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500189};
190
191struct blkio_policy_type {
192 struct list_head list;
193 struct blkio_policy_ops ops;
Vivek Goyal062a6442010-09-15 17:06:33 -0400194 enum blkio_policy_id plid;
Tejun Heo03814112012-03-05 13:15:14 -0800195 size_t pdata_size; /* policy specific private data size */
Vivek Goyal3e252062009-12-04 10:36:42 -0500196};
197
Tejun Heo5efd6112012-03-05 13:15:12 -0800198extern int blkcg_init_queue(struct request_queue *q);
199extern void blkcg_drain_queue(struct request_queue *q);
200extern void blkcg_exit_queue(struct request_queue *q);
201
Vivek Goyal3e252062009-12-04 10:36:42 -0500202/* Blkio controller policy registration */
203extern void blkio_policy_register(struct blkio_policy_type *);
204extern void blkio_policy_unregister(struct blkio_policy_type *);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800205extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
206extern void update_root_blkg_pd(struct request_queue *q,
207 enum blkio_policy_id plid);
Vivek Goyal3e252062009-12-04 10:36:42 -0500208
Tejun Heo03814112012-03-05 13:15:14 -0800209/**
210 * blkg_to_pdata - get policy private data
211 * @blkg: blkg of interest
212 * @pol: policy of interest
213 *
214 * Return pointer to private data associated with the @blkg-@pol pair.
215 */
216static inline void *blkg_to_pdata(struct blkio_group *blkg,
217 struct blkio_policy_type *pol)
218{
Tejun Heo549d3aa2012-03-05 13:15:16 -0800219 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800220}
221
222/**
223 * pdata_to_blkg - get blkg associated with policy private data
224 * @pdata: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800225 *
Tejun Heoaaec55a2012-04-01 14:38:42 -0700226 * @pdata is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800227 */
Tejun Heoaaec55a2012-04-01 14:38:42 -0700228static inline struct blkio_group *pdata_to_blkg(void *pdata)
Tejun Heo03814112012-03-05 13:15:14 -0800229{
230 if (pdata) {
231 struct blkg_policy_data *pd =
232 container_of(pdata, struct blkg_policy_data, pdata);
233 return pd->blkg;
234 }
235 return NULL;
236}
237
Vivek Goyalafc24d42010-04-26 19:27:56 +0200238static inline char *blkg_path(struct blkio_group *blkg)
239{
240 return blkg->path;
241}
242
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800243/**
244 * blkg_get - get a blkg reference
245 * @blkg: blkg to get
246 *
247 * The caller should be holding queue_lock and an existing reference.
248 */
249static inline void blkg_get(struct blkio_group *blkg)
250{
251 lockdep_assert_held(blkg->q->queue_lock);
252 WARN_ON_ONCE(!blkg->refcnt);
253 blkg->refcnt++;
254}
255
256void __blkg_release(struct blkio_group *blkg);
257
258/**
259 * blkg_put - put a blkg reference
260 * @blkg: blkg to put
261 *
262 * The caller should be holding queue_lock.
263 */
264static inline void blkg_put(struct blkio_group *blkg)
265{
266 lockdep_assert_held(blkg->q->queue_lock);
267 WARN_ON_ONCE(blkg->refcnt <= 0);
268 if (!--blkg->refcnt)
269 __blkg_release(blkg);
270}
271
Tejun Heoedcb0722012-04-01 14:38:42 -0700272/**
273 * blkg_stat_add - add a value to a blkg_stat
274 * @stat: target blkg_stat
275 * @val: value to add
276 *
277 * Add @val to @stat. The caller is responsible for synchronizing calls to
278 * this function.
279 */
280static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
281{
282 u64_stats_update_begin(&stat->syncp);
283 stat->cnt += val;
284 u64_stats_update_end(&stat->syncp);
285}
286
287/**
288 * blkg_stat_read - read the current value of a blkg_stat
289 * @stat: blkg_stat to read
290 *
291 * Read the current value of @stat. This function can be called without
292 * synchroniztion and takes care of u64 atomicity.
293 */
294static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
295{
296 unsigned int start;
297 uint64_t v;
298
299 do {
300 start = u64_stats_fetch_begin(&stat->syncp);
301 v = stat->cnt;
302 } while (u64_stats_fetch_retry(&stat->syncp, start));
303
304 return v;
305}
306
307/**
308 * blkg_stat_reset - reset a blkg_stat
309 * @stat: blkg_stat to reset
310 */
311static inline void blkg_stat_reset(struct blkg_stat *stat)
312{
313 stat->cnt = 0;
314}
315
316/**
317 * blkg_rwstat_add - add a value to a blkg_rwstat
318 * @rwstat: target blkg_rwstat
319 * @rw: mask of REQ_{WRITE|SYNC}
320 * @val: value to add
321 *
322 * Add @val to @rwstat. The counters are chosen according to @rw. The
323 * caller is responsible for synchronizing calls to this function.
324 */
325static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
326 int rw, uint64_t val)
327{
328 u64_stats_update_begin(&rwstat->syncp);
329
330 if (rw & REQ_WRITE)
331 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
332 else
333 rwstat->cnt[BLKG_RWSTAT_READ] += val;
334 if (rw & REQ_SYNC)
335 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
336 else
337 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
338
339 u64_stats_update_end(&rwstat->syncp);
340}
341
342/**
343 * blkg_rwstat_read - read the current values of a blkg_rwstat
344 * @rwstat: blkg_rwstat to read
345 *
346 * Read the current snapshot of @rwstat and return it as the return value.
347 * This function can be called without synchronization and takes care of
348 * u64 atomicity.
349 */
350static struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
351{
352 unsigned int start;
353 struct blkg_rwstat tmp;
354
355 do {
356 start = u64_stats_fetch_begin(&rwstat->syncp);
357 tmp = *rwstat;
358 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
359
360 return tmp;
361}
362
363/**
364 * blkg_rwstat_sum - read the total count of a blkg_rwstat
365 * @rwstat: blkg_rwstat to read
366 *
367 * Return the total count of @rwstat regardless of the IO direction. This
368 * function can be called without synchronization and takes care of u64
369 * atomicity.
370 */
371static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
372{
373 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
374
375 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
376}
377
378/**
379 * blkg_rwstat_reset - reset a blkg_rwstat
380 * @rwstat: blkg_rwstat to reset
381 */
382static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
383{
384 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
385}
386
Jens Axboe2f5ea472009-12-03 21:06:43 +0100387#else
388
389struct blkio_group {
390};
391
Vivek Goyal3e252062009-12-04 10:36:42 -0500392struct blkio_policy_type {
393};
394
Tejun Heo5efd6112012-03-05 13:15:12 -0800395static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
396static inline void blkcg_drain_queue(struct request_queue *q) { }
397static inline void blkcg_exit_queue(struct request_queue *q) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500398static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
399static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
Tejun Heo03aa264a2012-03-05 13:15:19 -0800400static inline void blkg_destroy_all(struct request_queue *q,
Tejun Heo03aa264a2012-03-05 13:15:19 -0800401 bool destory_root) { }
Tejun Heoe8989fa2012-03-05 13:15:20 -0800402static inline void update_root_blkg_pd(struct request_queue *q,
403 enum blkio_policy_id plid) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500404
Tejun Heo03814112012-03-05 13:15:14 -0800405static inline void *blkg_to_pdata(struct blkio_group *blkg,
406 struct blkio_policy_type *pol) { return NULL; }
407static inline struct blkio_group *pdata_to_blkg(void *pdata,
408 struct blkio_policy_type *pol) { return NULL; }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200409static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800410static inline void blkg_get(struct blkio_group *blkg) { }
411static inline void blkg_put(struct blkio_group *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200412
Jens Axboe2f5ea472009-12-03 21:06:43 +0100413#endif
414
Justin TerAvestdf457f82011-03-08 19:45:00 +0100415#define BLKIO_WEIGHT_MIN 10
Vivek Goyal31e4c282009-12-03 12:59:42 -0500416#define BLKIO_WEIGHT_MAX 1000
417#define BLKIO_WEIGHT_DEFAULT 500
418
Vivek Goyal2868ef72009-12-03 12:59:48 -0500419#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoc1768262012-03-05 13:15:17 -0800420void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
421 struct blkio_policy_type *pol);
Divyesh Shah91952912010-04-01 15:01:41 -0700422void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800423 struct blkio_policy_type *pol,
424 unsigned long dequeue);
425void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
426 struct blkio_policy_type *pol);
427void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
428 struct blkio_policy_type *pol);
429void blkiocg_set_start_empty_time(struct blkio_group *blkg,
430 struct blkio_policy_type *pol);
Divyesh Shah812df482010-04-08 21:15:35 -0700431
432#define BLKG_FLAG_FNS(name) \
433static inline void blkio_mark_blkg_##name( \
434 struct blkio_group_stats *stats) \
435{ \
436 stats->flags |= (1 << BLKG_##name); \
437} \
438static inline void blkio_clear_blkg_##name( \
439 struct blkio_group_stats *stats) \
440{ \
441 stats->flags &= ~(1 << BLKG_##name); \
442} \
443static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
444{ \
445 return (stats->flags & (1 << BLKG_##name)) != 0; \
446} \
447
448BLKG_FLAG_FNS(waiting)
449BLKG_FLAG_FNS(idling)
450BLKG_FLAG_FNS(empty)
451#undef BLKG_FLAG_FNS
Vivek Goyal2868ef72009-12-03 12:59:48 -0500452#else
Tejun Heoc1768262012-03-05 13:15:17 -0800453static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
454 struct blkio_policy_type *pol) { }
Divyesh Shah91952912010-04-01 15:01:41 -0700455static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800456 struct blkio_policy_type *pol, unsigned long dequeue) { }
457static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
458 struct blkio_policy_type *pol) { }
459static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
460 struct blkio_policy_type *pol) { }
461static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
462 struct blkio_policy_type *pol) { }
Vivek Goyal2868ef72009-12-03 12:59:48 -0500463#endif
464
Tejun Heo32e380a2012-03-05 13:14:54 -0800465#ifdef CONFIG_BLK_CGROUP
Vivek Goyal31e4c282009-12-03 12:59:42 -0500466extern struct blkio_cgroup blkio_root_cgroup;
467extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800468extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
Tejun Heocd1604f2012-03-05 13:15:06 -0800469extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -0800470 struct request_queue *q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800471struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
472 struct request_queue *q,
Tejun Heocd1604f2012-03-05 13:15:06 -0800473 bool for_root);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700474void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800475 struct blkio_policy_type *pol,
476 unsigned long time,
477 unsigned long unaccounted_time);
478void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
479 struct blkio_policy_type *pol,
480 uint64_t bytes, bool direction, bool sync);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200481void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800482 struct blkio_policy_type *pol,
483 uint64_t start_time,
484 uint64_t io_start_time, bool direction,
485 bool sync);
486void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
487 struct blkio_policy_type *pol,
488 bool direction, bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200489void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800490 struct blkio_policy_type *pol,
491 struct blkio_group *curr_blkg, bool direction,
492 bool sync);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200493void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800494 struct blkio_policy_type *pol,
495 bool direction, bool sync);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500496#else
Jens Axboe2f5ea472009-12-03 21:06:43 +0100497struct cgroup;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500498static inline struct blkio_cgroup *
499cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
Vivek Goyal70087dc2011-05-16 15:24:08 +0200500static inline struct blkio_cgroup *
Tejun Heo4f85cb92012-03-05 13:15:28 -0800501bio_blkio_cgroup(struct bio *bio) { return NULL; }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500502
Tejun Heocd1604f2012-03-05 13:15:06 -0800503static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
504 void *key) { return NULL; }
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700505static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800506 struct blkio_policy_type *pol, unsigned long time,
507 unsigned long unaccounted_time) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200508static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800509 struct blkio_policy_type *pol, uint64_t bytes,
510 bool direction, bool sync) { }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200511static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800512 struct blkio_policy_type *pol, uint64_t start_time,
513 uint64_t io_start_time, bool direction, bool sync) { }
Divyesh Shah812d4022010-04-08 21:14:23 -0700514static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800515 struct blkio_policy_type *pol, bool direction,
516 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200517static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800518 struct blkio_policy_type *pol,
519 struct blkio_group *curr_blkg, bool direction,
520 bool sync) { }
Divyesh Shaha11cdaa2010-04-13 19:59:17 +0200521static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
Tejun Heoc1768262012-03-05 13:15:17 -0800522 struct blkio_policy_type *pol, bool direction,
523 bool sync) { }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500524#endif
525#endif /* _BLK_CGROUP_H */