blob: c82de47ae69f61087b7e3ed6da6ba7ce18e7ec94 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050019
Vivek Goyal062a6442010-09-15 17:06:33 -040020enum blkio_policy_id {
21 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
Vivek Goyal4c9eefa2010-09-15 17:06:34 -040022 BLKIO_POLICY_THROTL, /* Throttling */
Tejun Heo035d10b2012-03-05 13:15:04 -080023
24 BLKIO_NR_POLICIES,
Vivek Goyal062a6442010-09-15 17:06:33 -040025};
26
Vivek Goyal9355aed2010-10-01 21:16:41 +020027/* Max limits for throttle policy */
28#define THROTL_IOPS_MAX UINT_MAX
29
Tejun Heo32e380a2012-03-05 13:14:54 -080030#ifdef CONFIG_BLK_CGROUP
Jens Axboe2f5ea472009-12-03 21:06:43 +010031
Tejun Heod3d32e692012-04-01 14:38:42 -070032/* cft->private [un]packing for stat printing */
33#define BLKCG_STAT_PRIV(pol, off) (((unsigned)(pol) << 16) | (off))
34#define BLKCG_STAT_POL(prv) ((unsigned)(prv) >> 16)
35#define BLKCG_STAT_OFF(prv) ((unsigned)(prv) & 0xffff)
Tejun Heo2aa4a152012-04-01 14:38:42 -070036
Tejun Heoedcb0722012-04-01 14:38:42 -070037enum blkg_rwstat_type {
38 BLKG_RWSTAT_READ,
39 BLKG_RWSTAT_WRITE,
40 BLKG_RWSTAT_SYNC,
41 BLKG_RWSTAT_ASYNC,
42
43 BLKG_RWSTAT_NR,
44 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070045};
46
Vivek Goyal31e4c282009-12-03 12:59:42 -050047struct blkio_cgroup {
48 struct cgroup_subsys_state css;
49 unsigned int weight;
50 spinlock_t lock;
51 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070052
53 /* for policies to test whether associated blkcg has changed */
54 uint64_t id;
Vivek Goyal31e4c282009-12-03 12:59:42 -050055};
56
Tejun Heoedcb0722012-04-01 14:38:42 -070057struct blkg_stat {
58 struct u64_stats_sync syncp;
59 uint64_t cnt;
60};
61
62struct blkg_rwstat {
63 struct u64_stats_sync syncp;
64 uint64_t cnt[BLKG_RWSTAT_NR];
65};
66
Divyesh Shah303a3ac2010-04-01 15:01:24 -070067struct blkio_group_stats {
Tejun Heo41b38b62012-04-01 14:38:44 -070068 /* total bytes transferred */
69 struct blkg_rwstat service_bytes;
70 /* total IOs serviced, post merge */
71 struct blkg_rwstat serviced;
Tejun Heoedcb0722012-04-01 14:38:42 -070072 /* number of ios merged */
73 struct blkg_rwstat merged;
74 /* total time spent on device in ns, may not be accurate w/ queueing */
75 struct blkg_rwstat service_time;
76 /* total time spent waiting in scheduler queue in ns */
77 struct blkg_rwstat wait_time;
78 /* number of IOs queued up */
79 struct blkg_rwstat queued;
Tejun Heo41b38b62012-04-01 14:38:44 -070080 /* total sectors transferred */
81 struct blkg_stat sectors;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070082 /* total disk time and nr sectors dispatched by this group */
Tejun Heoedcb0722012-04-01 14:38:42 -070083 struct blkg_stat time;
Divyesh Shah303a3ac2010-04-01 15:01:24 -070084#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoedcb0722012-04-01 14:38:42 -070085 /* time not charged to this cgroup */
86 struct blkg_stat unaccounted_time;
87 /* sum of number of ios queued across all samples */
88 struct blkg_stat avg_queue_size_sum;
89 /* count of samples taken for average */
90 struct blkg_stat avg_queue_size_samples;
91 /* how many times this group has been removed from service tree */
92 struct blkg_stat dequeue;
93 /* total time spent waiting for it to be assigned a timeslice. */
94 struct blkg_stat group_wait_time;
95 /* time spent idling for this blkio_group */
96 struct blkg_stat idle_time;
97 /* total time with empty current active q with other requests queued */
98 struct blkg_stat empty_time;
Tejun Heo997a0262012-03-08 10:53:58 -080099 /* fields after this shouldn't be cleared on stat reset */
Tejun Heoedcb0722012-04-01 14:38:42 -0700100 uint64_t start_group_wait_time;
101 uint64_t start_idle_time;
102 uint64_t start_empty_time;
103 uint16_t flags;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700104#endif
105};
106
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400107/* Per cpu blkio group stats */
108struct blkio_group_stats_cpu {
Tejun Heoedcb0722012-04-01 14:38:42 -0700109 /* total bytes transferred */
110 struct blkg_rwstat service_bytes;
111 /* total IOs serviced, post merge */
112 struct blkg_rwstat serviced;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400113};
114
Tejun Heoe56da7e2012-03-05 13:15:07 -0800115struct blkio_group_conf {
116 unsigned int weight;
Tejun Heoc4682ae2012-04-01 14:38:43 -0700117 u64 iops[2];
Tejun Heoe56da7e2012-03-05 13:15:07 -0800118 u64 bps[2];
119};
120
Tejun Heo03814112012-03-05 13:15:14 -0800121/* per-blkg per-policy data */
122struct blkg_policy_data {
123 /* the blkg this per-policy data belongs to */
124 struct blkio_group *blkg;
125
Tejun Heo549d3aa2012-03-05 13:15:16 -0800126 /* Configuration */
127 struct blkio_group_conf conf;
128
129 struct blkio_group_stats stats;
130 /* Per cpu stats pointer */
131 struct blkio_group_stats_cpu __percpu *stats_cpu;
132
Tejun Heo03814112012-03-05 13:15:14 -0800133 /* pol->pdata_size bytes of private data used by policy impl */
134 char pdata[] __aligned(__alignof__(unsigned long long));
135};
136
Vivek Goyal31e4c282009-12-03 12:59:42 -0500137struct blkio_group {
Tejun Heoc875f4d2012-03-05 13:15:22 -0800138 /* Pointer to the associated request_queue */
139 struct request_queue *q;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800140 struct list_head q_node;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500141 struct hlist_node blkcg_node;
Tejun Heo7ee9c562012-03-05 13:15:11 -0800142 struct blkio_cgroup *blkcg;
Vivek Goyal2868ef72009-12-03 12:59:48 -0500143 /* Store cgroup path */
144 char path[128];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800145 /* reference count */
146 int refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500147
Tejun Heo549d3aa2012-03-05 13:15:16 -0800148 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800149
Vivek Goyal1cd9e032012-03-08 10:53:56 -0800150 /* List of blkg waiting for per cpu stats memory to be allocated */
151 struct list_head alloc_node;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800152 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500153};
154
Tejun Heo03814112012-03-05 13:15:14 -0800155typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
Vivek Goyal3e252062009-12-04 10:36:42 -0500156
157struct blkio_policy_ops {
Tejun Heo03814112012-03-05 13:15:14 -0800158 blkio_init_group_fn *blkio_init_group_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500159};
160
161struct blkio_policy_type {
162 struct list_head list;
163 struct blkio_policy_ops ops;
Vivek Goyal062a6442010-09-15 17:06:33 -0400164 enum blkio_policy_id plid;
Tejun Heo03814112012-03-05 13:15:14 -0800165 size_t pdata_size; /* policy specific private data size */
Tejun Heo44ea53d2012-04-01 14:38:43 -0700166 struct cftype *cftypes; /* cgroup files for the policy */
Vivek Goyal3e252062009-12-04 10:36:42 -0500167};
168
Tejun Heo5efd6112012-03-05 13:15:12 -0800169extern int blkcg_init_queue(struct request_queue *q);
170extern void blkcg_drain_queue(struct request_queue *q);
171extern void blkcg_exit_queue(struct request_queue *q);
172
Vivek Goyal3e252062009-12-04 10:36:42 -0500173/* Blkio controller policy registration */
174extern void blkio_policy_register(struct blkio_policy_type *);
175extern void blkio_policy_unregister(struct blkio_policy_type *);
Tejun Heoe8989fa2012-03-05 13:15:20 -0800176extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
177extern void update_root_blkg_pd(struct request_queue *q,
178 enum blkio_policy_id plid);
Vivek Goyal3e252062009-12-04 10:36:42 -0500179
Tejun Heo829fdb52012-04-01 14:38:43 -0700180void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
181 u64 (*prfill)(struct seq_file *, struct blkg_policy_data *, int),
182 int pol, int data, bool show_total);
183u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
184u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
185 const struct blkg_rwstat *rwstat);
186int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
187 struct seq_file *sf);
188int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
189 struct seq_file *sf);
Tejun Heo829fdb52012-04-01 14:38:43 -0700190
191struct blkg_conf_ctx {
192 struct gendisk *disk;
193 struct blkio_group *blkg;
194 u64 v;
195};
196
197int blkg_conf_prep(struct blkio_cgroup *blkcg, const char *input,
198 struct blkg_conf_ctx *ctx);
199void blkg_conf_finish(struct blkg_conf_ctx *ctx);
200
201
Tejun Heo03814112012-03-05 13:15:14 -0800202/**
203 * blkg_to_pdata - get policy private data
204 * @blkg: blkg of interest
205 * @pol: policy of interest
206 *
207 * Return pointer to private data associated with the @blkg-@pol pair.
208 */
209static inline void *blkg_to_pdata(struct blkio_group *blkg,
210 struct blkio_policy_type *pol)
211{
Tejun Heo549d3aa2012-03-05 13:15:16 -0800212 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800213}
214
215/**
216 * pdata_to_blkg - get blkg associated with policy private data
217 * @pdata: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800218 *
Tejun Heoaaec55a2012-04-01 14:38:42 -0700219 * @pdata is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800220 */
Tejun Heoaaec55a2012-04-01 14:38:42 -0700221static inline struct blkio_group *pdata_to_blkg(void *pdata)
Tejun Heo03814112012-03-05 13:15:14 -0800222{
223 if (pdata) {
224 struct blkg_policy_data *pd =
225 container_of(pdata, struct blkg_policy_data, pdata);
226 return pd->blkg;
227 }
228 return NULL;
229}
230
Vivek Goyalafc24d42010-04-26 19:27:56 +0200231static inline char *blkg_path(struct blkio_group *blkg)
232{
233 return blkg->path;
234}
235
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800236/**
237 * blkg_get - get a blkg reference
238 * @blkg: blkg to get
239 *
240 * The caller should be holding queue_lock and an existing reference.
241 */
242static inline void blkg_get(struct blkio_group *blkg)
243{
244 lockdep_assert_held(blkg->q->queue_lock);
245 WARN_ON_ONCE(!blkg->refcnt);
246 blkg->refcnt++;
247}
248
249void __blkg_release(struct blkio_group *blkg);
250
251/**
252 * blkg_put - put a blkg reference
253 * @blkg: blkg to put
254 *
255 * The caller should be holding queue_lock.
256 */
257static inline void blkg_put(struct blkio_group *blkg)
258{
259 lockdep_assert_held(blkg->q->queue_lock);
260 WARN_ON_ONCE(blkg->refcnt <= 0);
261 if (!--blkg->refcnt)
262 __blkg_release(blkg);
263}
264
Tejun Heoedcb0722012-04-01 14:38:42 -0700265/**
266 * blkg_stat_add - add a value to a blkg_stat
267 * @stat: target blkg_stat
268 * @val: value to add
269 *
270 * Add @val to @stat. The caller is responsible for synchronizing calls to
271 * this function.
272 */
273static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
274{
275 u64_stats_update_begin(&stat->syncp);
276 stat->cnt += val;
277 u64_stats_update_end(&stat->syncp);
278}
279
280/**
281 * blkg_stat_read - read the current value of a blkg_stat
282 * @stat: blkg_stat to read
283 *
284 * Read the current value of @stat. This function can be called without
285 * synchroniztion and takes care of u64 atomicity.
286 */
287static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
288{
289 unsigned int start;
290 uint64_t v;
291
292 do {
293 start = u64_stats_fetch_begin(&stat->syncp);
294 v = stat->cnt;
295 } while (u64_stats_fetch_retry(&stat->syncp, start));
296
297 return v;
298}
299
300/**
301 * blkg_stat_reset - reset a blkg_stat
302 * @stat: blkg_stat to reset
303 */
304static inline void blkg_stat_reset(struct blkg_stat *stat)
305{
306 stat->cnt = 0;
307}
308
309/**
310 * blkg_rwstat_add - add a value to a blkg_rwstat
311 * @rwstat: target blkg_rwstat
312 * @rw: mask of REQ_{WRITE|SYNC}
313 * @val: value to add
314 *
315 * Add @val to @rwstat. The counters are chosen according to @rw. The
316 * caller is responsible for synchronizing calls to this function.
317 */
318static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
319 int rw, uint64_t val)
320{
321 u64_stats_update_begin(&rwstat->syncp);
322
323 if (rw & REQ_WRITE)
324 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
325 else
326 rwstat->cnt[BLKG_RWSTAT_READ] += val;
327 if (rw & REQ_SYNC)
328 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
329 else
330 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
331
332 u64_stats_update_end(&rwstat->syncp);
333}
334
335/**
336 * blkg_rwstat_read - read the current values of a blkg_rwstat
337 * @rwstat: blkg_rwstat to read
338 *
339 * Read the current snapshot of @rwstat and return it as the return value.
340 * This function can be called without synchronization and takes care of
341 * u64 atomicity.
342 */
343static struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
344{
345 unsigned int start;
346 struct blkg_rwstat tmp;
347
348 do {
349 start = u64_stats_fetch_begin(&rwstat->syncp);
350 tmp = *rwstat;
351 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
352
353 return tmp;
354}
355
356/**
357 * blkg_rwstat_sum - read the total count of a blkg_rwstat
358 * @rwstat: blkg_rwstat to read
359 *
360 * Return the total count of @rwstat regardless of the IO direction. This
361 * function can be called without synchronization and takes care of u64
362 * atomicity.
363 */
364static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
365{
366 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
367
368 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
369}
370
371/**
372 * blkg_rwstat_reset - reset a blkg_rwstat
373 * @rwstat: blkg_rwstat to reset
374 */
375static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
376{
377 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
378}
379
Jens Axboe2f5ea472009-12-03 21:06:43 +0100380#else
381
382struct blkio_group {
383};
384
Vivek Goyal3e252062009-12-04 10:36:42 -0500385struct blkio_policy_type {
386};
387
Tejun Heo5efd6112012-03-05 13:15:12 -0800388static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
389static inline void blkcg_drain_queue(struct request_queue *q) { }
390static inline void blkcg_exit_queue(struct request_queue *q) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500391static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
392static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
Tejun Heo03aa264a2012-03-05 13:15:19 -0800393static inline void blkg_destroy_all(struct request_queue *q,
Tejun Heo03aa264a2012-03-05 13:15:19 -0800394 bool destory_root) { }
Tejun Heoe8989fa2012-03-05 13:15:20 -0800395static inline void update_root_blkg_pd(struct request_queue *q,
396 enum blkio_policy_id plid) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500397
Tejun Heo03814112012-03-05 13:15:14 -0800398static inline void *blkg_to_pdata(struct blkio_group *blkg,
399 struct blkio_policy_type *pol) { return NULL; }
400static inline struct blkio_group *pdata_to_blkg(void *pdata,
401 struct blkio_policy_type *pol) { return NULL; }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200402static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800403static inline void blkg_get(struct blkio_group *blkg) { }
404static inline void blkg_put(struct blkio_group *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200405
Jens Axboe2f5ea472009-12-03 21:06:43 +0100406#endif
407
Justin TerAvestdf457f82011-03-08 19:45:00 +0100408#define BLKIO_WEIGHT_MIN 10
Vivek Goyal31e4c282009-12-03 12:59:42 -0500409#define BLKIO_WEIGHT_MAX 1000
410#define BLKIO_WEIGHT_DEFAULT 500
411
Tejun Heo32e380a2012-03-05 13:14:54 -0800412#ifdef CONFIG_BLK_CGROUP
Vivek Goyal31e4c282009-12-03 12:59:42 -0500413extern struct blkio_cgroup blkio_root_cgroup;
414extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800415extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
Tejun Heocd1604f2012-03-05 13:15:06 -0800416extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
Tejun Heoe8989fa2012-03-05 13:15:20 -0800417 struct request_queue *q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800418struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
419 struct request_queue *q,
Tejun Heocd1604f2012-03-05 13:15:06 -0800420 bool for_root);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500421#else
Jens Axboe2f5ea472009-12-03 21:06:43 +0100422struct cgroup;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500423static inline struct blkio_cgroup *
424cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
Vivek Goyal70087dc2011-05-16 15:24:08 +0200425static inline struct blkio_cgroup *
Tejun Heo4f85cb92012-03-05 13:15:28 -0800426bio_blkio_cgroup(struct bio *bio) { return NULL; }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500427
Tejun Heocd1604f2012-03-05 13:15:06 -0800428static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
429 void *key) { return NULL; }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500430#endif
431#endif /* _BLK_CGROUP_H */