| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * resource cgroups | 
 | 3 |  * | 
 | 4 |  * Copyright 2007 OpenVZ SWsoft Inc | 
 | 5 |  * | 
 | 6 |  * Author: Pavel Emelianov <xemul@openvz.org> | 
 | 7 |  * | 
 | 8 |  */ | 
 | 9 |  | 
 | 10 | #include <linux/types.h> | 
 | 11 | #include <linux/parser.h> | 
 | 12 | #include <linux/fs.h> | 
 | 13 | #include <linux/res_counter.h> | 
 | 14 | #include <linux/uaccess.h> | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 15 | #include <linux/mm.h> | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 16 |  | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 17 | void res_counter_init(struct res_counter *counter, struct res_counter *parent) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 18 | { | 
 | 19 | 	spin_lock_init(&counter->lock); | 
| Daisuke Nishimura | c5b947b | 2009-06-17 16:27:20 -0700 | [diff] [blame] | 20 | 	counter->limit = RESOURCE_MAX; | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 21 | 	counter->soft_limit = RESOURCE_MAX; | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 22 | 	counter->parent = parent; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 23 | } | 
 | 24 |  | 
| Frederic Weisbecker | 4d8438f | 2012-04-25 01:11:35 +0200 | [diff] [blame] | 25 | int res_counter_charge_locked(struct res_counter *counter, unsigned long val, | 
 | 26 | 			      bool force) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 27 | { | 
| Frederic Weisbecker | 4d8438f | 2012-04-25 01:11:35 +0200 | [diff] [blame] | 28 | 	int ret = 0; | 
 | 29 |  | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 30 | 	if (counter->usage + val > counter->limit) { | 
 | 31 | 		counter->failcnt++; | 
| Frederic Weisbecker | 4d8438f | 2012-04-25 01:11:35 +0200 | [diff] [blame] | 32 | 		ret = -ENOMEM; | 
 | 33 | 		if (!force) | 
 | 34 | 			return ret; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 35 | 	} | 
 | 36 |  | 
 | 37 | 	counter->usage += val; | 
| Frederic Weisbecker | 0d4dde1 | 2012-04-25 01:11:36 +0200 | [diff] [blame] | 38 | 	if (counter->usage > counter->max_usage) | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 39 | 		counter->max_usage = counter->usage; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 40 | 	return ret; | 
 | 41 | } | 
 | 42 |  | 
| Frederic Weisbecker | 4d8438f | 2012-04-25 01:11:35 +0200 | [diff] [blame] | 43 | static int __res_counter_charge(struct res_counter *counter, unsigned long val, | 
 | 44 | 				struct res_counter **limit_fail_at, bool force) | 
| Glauber Costa | 0e90b31 | 2012-01-20 04:57:16 +0000 | [diff] [blame] | 45 | { | 
 | 46 | 	int ret, r; | 
 | 47 | 	unsigned long flags; | 
| Frederic Weisbecker | 4d8438f | 2012-04-25 01:11:35 +0200 | [diff] [blame] | 48 | 	struct res_counter *c, *u; | 
| Glauber Costa | 0e90b31 | 2012-01-20 04:57:16 +0000 | [diff] [blame] | 49 |  | 
 | 50 | 	r = ret = 0; | 
 | 51 | 	*limit_fail_at = NULL; | 
 | 52 | 	local_irq_save(flags); | 
 | 53 | 	for (c = counter; c != NULL; c = c->parent) { | 
 | 54 | 		spin_lock(&c->lock); | 
| Frederic Weisbecker | 4d8438f | 2012-04-25 01:11:35 +0200 | [diff] [blame] | 55 | 		r = res_counter_charge_locked(c, val, force); | 
| Glauber Costa | 0e90b31 | 2012-01-20 04:57:16 +0000 | [diff] [blame] | 56 | 		spin_unlock(&c->lock); | 
| Frederic Weisbecker | 4d8438f | 2012-04-25 01:11:35 +0200 | [diff] [blame] | 57 | 		if (r < 0 && !ret) { | 
| Glauber Costa | 0e90b31 | 2012-01-20 04:57:16 +0000 | [diff] [blame] | 58 | 			ret = r; | 
| Frederic Weisbecker | 4d8438f | 2012-04-25 01:11:35 +0200 | [diff] [blame] | 59 | 			*limit_fail_at = c; | 
 | 60 | 			if (!force) | 
 | 61 | 				break; | 
 | 62 | 		} | 
 | 63 | 	} | 
 | 64 |  | 
 | 65 | 	if (ret < 0 && !force) { | 
 | 66 | 		for (u = counter; u != c; u = u->parent) { | 
 | 67 | 			spin_lock(&u->lock); | 
 | 68 | 			res_counter_uncharge_locked(u, val); | 
 | 69 | 			spin_unlock(&u->lock); | 
| Glauber Costa | 0e90b31 | 2012-01-20 04:57:16 +0000 | [diff] [blame] | 70 | 		} | 
 | 71 | 	} | 
 | 72 | 	local_irq_restore(flags); | 
 | 73 |  | 
 | 74 | 	return ret; | 
 | 75 | } | 
| Frederic Weisbecker | 4d8438f | 2012-04-25 01:11:35 +0200 | [diff] [blame] | 76 |  | 
 | 77 | int res_counter_charge(struct res_counter *counter, unsigned long val, | 
 | 78 | 			struct res_counter **limit_fail_at) | 
 | 79 | { | 
 | 80 | 	return __res_counter_charge(counter, val, limit_fail_at, false); | 
 | 81 | } | 
 | 82 |  | 
 | 83 | int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, | 
 | 84 | 			      struct res_counter **limit_fail_at) | 
 | 85 | { | 
 | 86 | 	return __res_counter_charge(counter, val, limit_fail_at, true); | 
 | 87 | } | 
 | 88 |  | 
| Glauber Costa | 50bdd43 | 2012-12-18 14:22:04 -0800 | [diff] [blame] | 89 | u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 90 | { | 
 | 91 | 	if (WARN_ON(counter->usage < val)) | 
 | 92 | 		val = counter->usage; | 
 | 93 |  | 
 | 94 | 	counter->usage -= val; | 
| Glauber Costa | 50bdd43 | 2012-12-18 14:22:04 -0800 | [diff] [blame] | 95 | 	return counter->usage; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 96 | } | 
 | 97 |  | 
| Glauber Costa | 50bdd43 | 2012-12-18 14:22:04 -0800 | [diff] [blame] | 98 | u64 res_counter_uncharge_until(struct res_counter *counter, | 
 | 99 | 			       struct res_counter *top, | 
 | 100 | 			       unsigned long val) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 101 | { | 
 | 102 | 	unsigned long flags; | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 103 | 	struct res_counter *c; | 
| Glauber Costa | 50bdd43 | 2012-12-18 14:22:04 -0800 | [diff] [blame] | 104 | 	u64 ret = 0; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 105 |  | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 106 | 	local_irq_save(flags); | 
| Frederic Weisbecker | 2bb2ba9 | 2012-05-29 15:07:03 -0700 | [diff] [blame] | 107 | 	for (c = counter; c != top; c = c->parent) { | 
| Glauber Costa | 50bdd43 | 2012-12-18 14:22:04 -0800 | [diff] [blame] | 108 | 		u64 r; | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 109 | 		spin_lock(&c->lock); | 
| Glauber Costa | 50bdd43 | 2012-12-18 14:22:04 -0800 | [diff] [blame] | 110 | 		r = res_counter_uncharge_locked(c, val); | 
 | 111 | 		if (c == counter) | 
 | 112 | 			ret = r; | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 113 | 		spin_unlock(&c->lock); | 
 | 114 | 	} | 
 | 115 | 	local_irq_restore(flags); | 
| Glauber Costa | 50bdd43 | 2012-12-18 14:22:04 -0800 | [diff] [blame] | 116 | 	return ret; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 117 | } | 
 | 118 |  | 
| Glauber Costa | 50bdd43 | 2012-12-18 14:22:04 -0800 | [diff] [blame] | 119 | u64 res_counter_uncharge(struct res_counter *counter, unsigned long val) | 
| Frederic Weisbecker | 2bb2ba9 | 2012-05-29 15:07:03 -0700 | [diff] [blame] | 120 | { | 
| Glauber Costa | 50bdd43 | 2012-12-18 14:22:04 -0800 | [diff] [blame] | 121 | 	return res_counter_uncharge_until(counter, NULL, val); | 
| Frederic Weisbecker | 2bb2ba9 | 2012-05-29 15:07:03 -0700 | [diff] [blame] | 122 | } | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 123 |  | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 124 | static inline unsigned long long * | 
 | 125 | res_counter_member(struct res_counter *counter, int member) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 126 | { | 
 | 127 | 	switch (member) { | 
 | 128 | 	case RES_USAGE: | 
 | 129 | 		return &counter->usage; | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 130 | 	case RES_MAX_USAGE: | 
 | 131 | 		return &counter->max_usage; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 132 | 	case RES_LIMIT: | 
 | 133 | 		return &counter->limit; | 
 | 134 | 	case RES_FAILCNT: | 
 | 135 | 		return &counter->failcnt; | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 136 | 	case RES_SOFT_LIMIT: | 
 | 137 | 		return &counter->soft_limit; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 138 | 	}; | 
 | 139 |  | 
 | 140 | 	BUG(); | 
 | 141 | 	return NULL; | 
 | 142 | } | 
 | 143 |  | 
 | 144 | ssize_t res_counter_read(struct res_counter *counter, int member, | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 145 | 		const char __user *userbuf, size_t nbytes, loff_t *pos, | 
 | 146 | 		int (*read_strategy)(unsigned long long val, char *st_buf)) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 147 | { | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 148 | 	unsigned long long *val; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 149 | 	char buf[64], *s; | 
 | 150 |  | 
 | 151 | 	s = buf; | 
 | 152 | 	val = res_counter_member(counter, member); | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 153 | 	if (read_strategy) | 
 | 154 | 		s += read_strategy(*val, s); | 
 | 155 | 	else | 
 | 156 | 		s += sprintf(s, "%llu\n", *val); | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 157 | 	return simple_read_from_buffer((void __user *)userbuf, nbytes, | 
 | 158 | 			pos, buf, s - buf); | 
 | 159 | } | 
 | 160 |  | 
| KAMEZAWA Hiroyuki | 6c191cd | 2011-03-23 16:42:18 -0700 | [diff] [blame] | 161 | #if BITS_PER_LONG == 32 | 
 | 162 | u64 res_counter_read_u64(struct res_counter *counter, int member) | 
 | 163 | { | 
 | 164 | 	unsigned long flags; | 
 | 165 | 	u64 ret; | 
 | 166 |  | 
 | 167 | 	spin_lock_irqsave(&counter->lock, flags); | 
 | 168 | 	ret = *res_counter_member(counter, member); | 
 | 169 | 	spin_unlock_irqrestore(&counter->lock, flags); | 
 | 170 |  | 
 | 171 | 	return ret; | 
 | 172 | } | 
 | 173 | #else | 
| Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 174 | u64 res_counter_read_u64(struct res_counter *counter, int member) | 
 | 175 | { | 
 | 176 | 	return *res_counter_member(counter, member); | 
 | 177 | } | 
| KAMEZAWA Hiroyuki | 6c191cd | 2011-03-23 16:42:18 -0700 | [diff] [blame] | 178 | #endif | 
| Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 179 |  | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 180 | int res_counter_memparse_write_strategy(const char *buf, | 
 | 181 | 					unsigned long long *res) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 182 | { | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 183 | 	char *end; | 
| Daisuke Nishimura | c5b947b | 2009-06-17 16:27:20 -0700 | [diff] [blame] | 184 |  | 
 | 185 | 	/* return RESOURCE_MAX(unlimited) if "-1" is specified */ | 
 | 186 | 	if (*buf == '-') { | 
 | 187 | 		*res = simple_strtoull(buf + 1, &end, 10); | 
 | 188 | 		if (*res != 1 || *end != '\0') | 
 | 189 | 			return -EINVAL; | 
 | 190 | 		*res = RESOURCE_MAX; | 
 | 191 | 		return 0; | 
 | 192 | 	} | 
 | 193 |  | 
| Davidlohr Bueso | 52dcf8a | 2011-12-05 22:13:41 +0100 | [diff] [blame] | 194 | 	*res = memparse(buf, &end); | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 195 | 	if (*end != '\0') | 
 | 196 | 		return -EINVAL; | 
 | 197 |  | 
 | 198 | 	*res = PAGE_ALIGN(*res); | 
 | 199 | 	return 0; | 
 | 200 | } |