blob: 07a29923aba21c13016f9b6b0d83f00b0d1fad60 [file] [log] [blame]
Pavel Emelianove552b662008-02-07 00:13:49 -08001/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
13#include <linux/res_counter.h>
14#include <linux/uaccess.h>
Paul Menage856c13a2008-07-25 01:47:04 -070015#include <linux/mm.h>
Pavel Emelianove552b662008-02-07 00:13:49 -080016
Balbir Singh28dbc4b2009-01-07 18:08:05 -080017void res_counter_init(struct res_counter *counter, struct res_counter *parent)
Pavel Emelianove552b662008-02-07 00:13:49 -080018{
19 spin_lock_init(&counter->lock);
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -070020 counter->limit = RESOURCE_MAX;
Balbir Singh296c81d2009-09-23 15:56:36 -070021 counter->soft_limit = RESOURCE_MAX;
Balbir Singh28dbc4b2009-01-07 18:08:05 -080022 counter->parent = parent;
Pavel Emelianove552b662008-02-07 00:13:49 -080023}
24
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020025int res_counter_charge_locked(struct res_counter *counter, unsigned long val,
26 bool force)
Pavel Emelianove552b662008-02-07 00:13:49 -080027{
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020028 int ret = 0;
29
Pavel Emelianove552b662008-02-07 00:13:49 -080030 if (counter->usage + val > counter->limit) {
31 counter->failcnt++;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020032 ret = -ENOMEM;
33 if (!force)
34 return ret;
Pavel Emelianove552b662008-02-07 00:13:49 -080035 }
36
37 counter->usage += val;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020038 if (!force && counter->usage > counter->max_usage)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -070039 counter->max_usage = counter->usage;
Pavel Emelianove552b662008-02-07 00:13:49 -080040 return ret;
41}
42
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020043static int __res_counter_charge(struct res_counter *counter, unsigned long val,
44 struct res_counter **limit_fail_at, bool force)
Glauber Costa0e90b312012-01-20 04:57:16 +000045{
46 int ret, r;
47 unsigned long flags;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020048 struct res_counter *c, *u;
Glauber Costa0e90b312012-01-20 04:57:16 +000049
50 r = ret = 0;
51 *limit_fail_at = NULL;
52 local_irq_save(flags);
53 for (c = counter; c != NULL; c = c->parent) {
54 spin_lock(&c->lock);
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020055 r = res_counter_charge_locked(c, val, force);
Glauber Costa0e90b312012-01-20 04:57:16 +000056 spin_unlock(&c->lock);
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020057 if (r < 0 && !ret) {
Glauber Costa0e90b312012-01-20 04:57:16 +000058 ret = r;
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020059 *limit_fail_at = c;
60 if (!force)
61 break;
62 }
63 }
64
65 if (ret < 0 && !force) {
66 for (u = counter; u != c; u = u->parent) {
67 spin_lock(&u->lock);
68 res_counter_uncharge_locked(u, val);
69 spin_unlock(&u->lock);
Glauber Costa0e90b312012-01-20 04:57:16 +000070 }
71 }
72 local_irq_restore(flags);
73
74 return ret;
75}
Frederic Weisbecker4d8438f2012-04-25 01:11:35 +020076
77int res_counter_charge(struct res_counter *counter, unsigned long val,
78 struct res_counter **limit_fail_at)
79{
80 return __res_counter_charge(counter, val, limit_fail_at, false);
81}
82
83int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
84 struct res_counter **limit_fail_at)
85{
86 return __res_counter_charge(counter, val, limit_fail_at, true);
87}
88
Pavel Emelianove552b662008-02-07 00:13:49 -080089void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
90{
91 if (WARN_ON(counter->usage < val))
92 val = counter->usage;
93
94 counter->usage -= val;
95}
96
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -070097void res_counter_uncharge(struct res_counter *counter, unsigned long val)
Pavel Emelianove552b662008-02-07 00:13:49 -080098{
99 unsigned long flags;
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800100 struct res_counter *c;
Pavel Emelianove552b662008-02-07 00:13:49 -0800101
Balbir Singh28dbc4b2009-01-07 18:08:05 -0800102 local_irq_save(flags);
103 for (c = counter; c != NULL; c = c->parent) {
104 spin_lock(&c->lock);
105 res_counter_uncharge_locked(c, val);
106 spin_unlock(&c->lock);
107 }
108 local_irq_restore(flags);
Pavel Emelianove552b662008-02-07 00:13:49 -0800109}
110
111
Balbir Singh0eea1032008-02-07 00:13:57 -0800112static inline unsigned long long *
113res_counter_member(struct res_counter *counter, int member)
Pavel Emelianove552b662008-02-07 00:13:49 -0800114{
115 switch (member) {
116 case RES_USAGE:
117 return &counter->usage;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -0700118 case RES_MAX_USAGE:
119 return &counter->max_usage;
Pavel Emelianove552b662008-02-07 00:13:49 -0800120 case RES_LIMIT:
121 return &counter->limit;
122 case RES_FAILCNT:
123 return &counter->failcnt;
Balbir Singh296c81d2009-09-23 15:56:36 -0700124 case RES_SOFT_LIMIT:
125 return &counter->soft_limit;
Pavel Emelianove552b662008-02-07 00:13:49 -0800126 };
127
128 BUG();
129 return NULL;
130}
131
132ssize_t res_counter_read(struct res_counter *counter, int member,
Balbir Singh0eea1032008-02-07 00:13:57 -0800133 const char __user *userbuf, size_t nbytes, loff_t *pos,
134 int (*read_strategy)(unsigned long long val, char *st_buf))
Pavel Emelianove552b662008-02-07 00:13:49 -0800135{
Balbir Singh0eea1032008-02-07 00:13:57 -0800136 unsigned long long *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800137 char buf[64], *s;
138
139 s = buf;
140 val = res_counter_member(counter, member);
Balbir Singh0eea1032008-02-07 00:13:57 -0800141 if (read_strategy)
142 s += read_strategy(*val, s);
143 else
144 s += sprintf(s, "%llu\n", *val);
Pavel Emelianove552b662008-02-07 00:13:49 -0800145 return simple_read_from_buffer((void __user *)userbuf, nbytes,
146 pos, buf, s - buf);
147}
148
KAMEZAWA Hiroyuki6c191cd2011-03-23 16:42:18 -0700149#if BITS_PER_LONG == 32
150u64 res_counter_read_u64(struct res_counter *counter, int member)
151{
152 unsigned long flags;
153 u64 ret;
154
155 spin_lock_irqsave(&counter->lock, flags);
156 ret = *res_counter_member(counter, member);
157 spin_unlock_irqrestore(&counter->lock, flags);
158
159 return ret;
160}
161#else
Paul Menage2c7eabf2008-04-29 00:59:58 -0700162u64 res_counter_read_u64(struct res_counter *counter, int member)
163{
164 return *res_counter_member(counter, member);
165}
KAMEZAWA Hiroyuki6c191cd2011-03-23 16:42:18 -0700166#endif
Paul Menage2c7eabf2008-04-29 00:59:58 -0700167
Paul Menage856c13a2008-07-25 01:47:04 -0700168int res_counter_memparse_write_strategy(const char *buf,
169 unsigned long long *res)
Pavel Emelianove552b662008-02-07 00:13:49 -0800170{
Paul Menage856c13a2008-07-25 01:47:04 -0700171 char *end;
Daisuke Nishimurac5b947b2009-06-17 16:27:20 -0700172
173 /* return RESOURCE_MAX(unlimited) if "-1" is specified */
174 if (*buf == '-') {
175 *res = simple_strtoull(buf + 1, &end, 10);
176 if (*res != 1 || *end != '\0')
177 return -EINVAL;
178 *res = RESOURCE_MAX;
179 return 0;
180 }
181
Davidlohr Bueso52dcf8a2011-12-05 22:13:41 +0100182 *res = memparse(buf, &end);
Paul Menage856c13a2008-07-25 01:47:04 -0700183 if (*end != '\0')
184 return -EINVAL;
185
186 *res = PAGE_ALIGN(*res);
187 return 0;
188}
189
190int res_counter_write(struct res_counter *counter, int member,
191 const char *buf, write_strategy_fn write_strategy)
192{
193 char *end;
Balbir Singh0eea1032008-02-07 00:13:57 -0800194 unsigned long flags;
195 unsigned long long tmp, *val;
Pavel Emelianove552b662008-02-07 00:13:49 -0800196
Balbir Singh0eea1032008-02-07 00:13:57 -0800197 if (write_strategy) {
Paul Menage856c13a2008-07-25 01:47:04 -0700198 if (write_strategy(buf, &tmp))
199 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800200 } else {
201 tmp = simple_strtoull(buf, &end, 10);
202 if (*end != '\0')
Paul Menage856c13a2008-07-25 01:47:04 -0700203 return -EINVAL;
Balbir Singh0eea1032008-02-07 00:13:57 -0800204 }
205 spin_lock_irqsave(&counter->lock, flags);
Pavel Emelianove552b662008-02-07 00:13:49 -0800206 val = res_counter_member(counter, member);
207 *val = tmp;
Balbir Singh0eea1032008-02-07 00:13:57 -0800208 spin_unlock_irqrestore(&counter->lock, flags);
Paul Menage856c13a2008-07-25 01:47:04 -0700209 return 0;
Pavel Emelianove552b662008-02-07 00:13:49 -0800210}