| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * resource cgroups | 
 | 3 |  * | 
 | 4 |  * Copyright 2007 OpenVZ SWsoft Inc | 
 | 5 |  * | 
 | 6 |  * Author: Pavel Emelianov <xemul@openvz.org> | 
 | 7 |  * | 
 | 8 |  */ | 
 | 9 |  | 
 | 10 | #include <linux/types.h> | 
 | 11 | #include <linux/parser.h> | 
 | 12 | #include <linux/fs.h> | 
 | 13 | #include <linux/res_counter.h> | 
 | 14 | #include <linux/uaccess.h> | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 15 | #include <linux/mm.h> | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 16 |  | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 17 | void res_counter_init(struct res_counter *counter, struct res_counter *parent) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 18 | { | 
 | 19 | 	spin_lock_init(&counter->lock); | 
| Daisuke Nishimura | c5b947b | 2009-06-17 16:27:20 -0700 | [diff] [blame] | 20 | 	counter->limit = RESOURCE_MAX; | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 21 | 	counter->soft_limit = RESOURCE_MAX; | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 22 | 	counter->parent = parent; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 23 | } | 
 | 24 |  | 
 | 25 | int res_counter_charge_locked(struct res_counter *counter, unsigned long val) | 
 | 26 | { | 
 | 27 | 	if (counter->usage + val > counter->limit) { | 
 | 28 | 		counter->failcnt++; | 
 | 29 | 		return -ENOMEM; | 
 | 30 | 	} | 
 | 31 |  | 
 | 32 | 	counter->usage += val; | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 33 | 	if (counter->usage > counter->max_usage) | 
 | 34 | 		counter->max_usage = counter->usage; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 35 | 	return 0; | 
 | 36 | } | 
 | 37 |  | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 38 | int res_counter_charge(struct res_counter *counter, unsigned long val, | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 39 | 			struct res_counter **limit_fail_at) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 40 | { | 
 | 41 | 	int ret; | 
 | 42 | 	unsigned long flags; | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 43 | 	struct res_counter *c, *u; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 44 |  | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 45 | 	*limit_fail_at = NULL; | 
 | 46 | 	local_irq_save(flags); | 
 | 47 | 	for (c = counter; c != NULL; c = c->parent) { | 
 | 48 | 		spin_lock(&c->lock); | 
 | 49 | 		ret = res_counter_charge_locked(c, val); | 
 | 50 | 		spin_unlock(&c->lock); | 
 | 51 | 		if (ret < 0) { | 
 | 52 | 			*limit_fail_at = c; | 
 | 53 | 			goto undo; | 
 | 54 | 		} | 
 | 55 | 	} | 
 | 56 | 	ret = 0; | 
 | 57 | 	goto done; | 
 | 58 | undo: | 
 | 59 | 	for (u = counter; u != c; u = u->parent) { | 
 | 60 | 		spin_lock(&u->lock); | 
 | 61 | 		res_counter_uncharge_locked(u, val); | 
 | 62 | 		spin_unlock(&u->lock); | 
 | 63 | 	} | 
 | 64 | done: | 
 | 65 | 	local_irq_restore(flags); | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 66 | 	return ret; | 
 | 67 | } | 
 | 68 |  | 
 | 69 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) | 
 | 70 | { | 
 | 71 | 	if (WARN_ON(counter->usage < val)) | 
 | 72 | 		val = counter->usage; | 
 | 73 |  | 
 | 74 | 	counter->usage -= val; | 
 | 75 | } | 
 | 76 |  | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 77 | void res_counter_uncharge(struct res_counter *counter, unsigned long val) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 78 | { | 
 | 79 | 	unsigned long flags; | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 80 | 	struct res_counter *c; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 81 |  | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 82 | 	local_irq_save(flags); | 
 | 83 | 	for (c = counter; c != NULL; c = c->parent) { | 
 | 84 | 		spin_lock(&c->lock); | 
 | 85 | 		res_counter_uncharge_locked(c, val); | 
 | 86 | 		spin_unlock(&c->lock); | 
 | 87 | 	} | 
 | 88 | 	local_irq_restore(flags); | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 89 | } | 
 | 90 |  | 
 | 91 |  | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 92 | static inline unsigned long long * | 
 | 93 | res_counter_member(struct res_counter *counter, int member) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 94 | { | 
 | 95 | 	switch (member) { | 
 | 96 | 	case RES_USAGE: | 
 | 97 | 		return &counter->usage; | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 98 | 	case RES_MAX_USAGE: | 
 | 99 | 		return &counter->max_usage; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 100 | 	case RES_LIMIT: | 
 | 101 | 		return &counter->limit; | 
 | 102 | 	case RES_FAILCNT: | 
 | 103 | 		return &counter->failcnt; | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 104 | 	case RES_SOFT_LIMIT: | 
 | 105 | 		return &counter->soft_limit; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 106 | 	}; | 
 | 107 |  | 
 | 108 | 	BUG(); | 
 | 109 | 	return NULL; | 
 | 110 | } | 
 | 111 |  | 
 | 112 | ssize_t res_counter_read(struct res_counter *counter, int member, | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 113 | 		const char __user *userbuf, size_t nbytes, loff_t *pos, | 
 | 114 | 		int (*read_strategy)(unsigned long long val, char *st_buf)) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 115 | { | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 116 | 	unsigned long long *val; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 117 | 	char buf[64], *s; | 
 | 118 |  | 
 | 119 | 	s = buf; | 
 | 120 | 	val = res_counter_member(counter, member); | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 121 | 	if (read_strategy) | 
 | 122 | 		s += read_strategy(*val, s); | 
 | 123 | 	else | 
 | 124 | 		s += sprintf(s, "%llu\n", *val); | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 125 | 	return simple_read_from_buffer((void __user *)userbuf, nbytes, | 
 | 126 | 			pos, buf, s - buf); | 
 | 127 | } | 
 | 128 |  | 
| KAMEZAWA Hiroyuki | 6c191cd | 2011-03-23 16:42:18 -0700 | [diff] [blame] | 129 | #if BITS_PER_LONG == 32 | 
 | 130 | u64 res_counter_read_u64(struct res_counter *counter, int member) | 
 | 131 | { | 
 | 132 | 	unsigned long flags; | 
 | 133 | 	u64 ret; | 
 | 134 |  | 
 | 135 | 	spin_lock_irqsave(&counter->lock, flags); | 
 | 136 | 	ret = *res_counter_member(counter, member); | 
 | 137 | 	spin_unlock_irqrestore(&counter->lock, flags); | 
 | 138 |  | 
 | 139 | 	return ret; | 
 | 140 | } | 
 | 141 | #else | 
| Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 142 | u64 res_counter_read_u64(struct res_counter *counter, int member) | 
 | 143 | { | 
 | 144 | 	return *res_counter_member(counter, member); | 
 | 145 | } | 
| KAMEZAWA Hiroyuki | 6c191cd | 2011-03-23 16:42:18 -0700 | [diff] [blame] | 146 | #endif | 
| Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 147 |  | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 148 | int res_counter_memparse_write_strategy(const char *buf, | 
 | 149 | 					unsigned long long *res) | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 150 | { | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 151 | 	char *end; | 
| Daisuke Nishimura | c5b947b | 2009-06-17 16:27:20 -0700 | [diff] [blame] | 152 |  | 
 | 153 | 	/* return RESOURCE_MAX(unlimited) if "-1" is specified */ | 
 | 154 | 	if (*buf == '-') { | 
 | 155 | 		*res = simple_strtoull(buf + 1, &end, 10); | 
 | 156 | 		if (*res != 1 || *end != '\0') | 
 | 157 | 			return -EINVAL; | 
 | 158 | 		*res = RESOURCE_MAX; | 
 | 159 | 		return 0; | 
 | 160 | 	} | 
 | 161 |  | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 162 | 	/* FIXME - make memparse() take const char* args */ | 
 | 163 | 	*res = memparse((char *)buf, &end); | 
 | 164 | 	if (*end != '\0') | 
 | 165 | 		return -EINVAL; | 
 | 166 |  | 
 | 167 | 	*res = PAGE_ALIGN(*res); | 
 | 168 | 	return 0; | 
 | 169 | } | 
 | 170 |  | 
 | 171 | int res_counter_write(struct res_counter *counter, int member, | 
 | 172 | 		      const char *buf, write_strategy_fn write_strategy) | 
 | 173 | { | 
 | 174 | 	char *end; | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 175 | 	unsigned long flags; | 
 | 176 | 	unsigned long long tmp, *val; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 177 |  | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 178 | 	if (write_strategy) { | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 179 | 		if (write_strategy(buf, &tmp)) | 
 | 180 | 			return -EINVAL; | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 181 | 	} else { | 
 | 182 | 		tmp = simple_strtoull(buf, &end, 10); | 
 | 183 | 		if (*end != '\0') | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 184 | 			return -EINVAL; | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 185 | 	} | 
 | 186 | 	spin_lock_irqsave(&counter->lock, flags); | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 187 | 	val = res_counter_member(counter, member); | 
 | 188 | 	*val = tmp; | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 189 | 	spin_unlock_irqrestore(&counter->lock, flags); | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 190 | 	return 0; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 191 | } |