| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 1 | #ifndef __RES_COUNTER_H__ | 
|  | 2 | #define __RES_COUNTER_H__ | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Resource Counters | 
|  | 6 | * Contain common data types and routines for resource accounting | 
|  | 7 | * | 
|  | 8 | * Copyright 2007 OpenVZ SWsoft Inc | 
|  | 9 | * | 
|  | 10 | * Author: Pavel Emelianov <xemul@openvz.org> | 
|  | 11 | * | 
| Li Zefan | 45ce80f | 2009-01-15 13:50:59 -0800 | [diff] [blame] | 12 | * See Documentation/cgroups/resource_counter.txt for more | 
| Pavel Emelyanov | faebe9f | 2008-04-29 01:00:18 -0700 | [diff] [blame] | 13 | * info about what this counter is. | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 14 | */ | 
|  | 15 |  | 
|  | 16 | #include <linux/cgroup.h> | 
|  | 17 |  | 
|  | 18 | /* | 
|  | 19 | * The core object. the cgroup that wishes to account for some | 
|  | 20 | * resource may include this counter into its structures and use | 
|  | 21 | * the helpers described beyond | 
|  | 22 | */ | 
|  | 23 |  | 
|  | 24 | struct res_counter { | 
|  | 25 | /* | 
|  | 26 | * the current resource consumption level | 
|  | 27 | */ | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 28 | unsigned long long usage; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 29 | /* | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 30 | * the maximal value of the usage from the counter creation | 
|  | 31 | */ | 
|  | 32 | unsigned long long max_usage; | 
|  | 33 | /* | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 34 | * the limit that usage cannot exceed | 
|  | 35 | */ | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 36 | unsigned long long limit; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 37 | /* | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 38 | * the limit that usage can be exceed | 
|  | 39 | */ | 
|  | 40 | unsigned long long soft_limit; | 
|  | 41 | /* | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 42 | * the number of unsuccessful attempts to consume the resource | 
|  | 43 | */ | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 44 | unsigned long long failcnt; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 45 | /* | 
|  | 46 | * the lock to protect all of the above. | 
|  | 47 | * the routines below consider this to be IRQ-safe | 
|  | 48 | */ | 
|  | 49 | spinlock_t lock; | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 50 | /* | 
|  | 51 | * Parent counter, used for hierarchial resource accounting | 
|  | 52 | */ | 
|  | 53 | struct res_counter *parent; | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 54 | }; | 
|  | 55 |  | 
| Daisuke Nishimura | c5b947b | 2009-06-17 16:27:20 -0700 | [diff] [blame] | 56 | #define RESOURCE_MAX (unsigned long long)LLONG_MAX | 
|  | 57 |  | 
| Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 58 | /** | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 59 | * Helpers to interact with userspace | 
| Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 60 | * res_counter_read_u64() - returns the value of the specified member. | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 61 | * res_counter_read/_write - put/get the specified fields from the | 
|  | 62 | * res_counter struct to/from the user | 
|  | 63 | * | 
|  | 64 | * @counter:     the counter in question | 
|  | 65 | * @member:  the field to work with (see RES_xxx below) | 
|  | 66 | * @buf:     the buffer to opeate on,... | 
|  | 67 | * @nbytes:  its size... | 
|  | 68 | * @pos:     and the offset. | 
|  | 69 | */ | 
|  | 70 |  | 
| Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 71 | u64 res_counter_read_u64(struct res_counter *counter, int member); | 
|  | 72 |  | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 73 | ssize_t res_counter_read(struct res_counter *counter, int member, | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 74 | const char __user *buf, size_t nbytes, loff_t *pos, | 
|  | 75 | int (*read_strategy)(unsigned long long val, char *s)); | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 76 |  | 
|  | 77 | typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val); | 
|  | 78 |  | 
|  | 79 | int res_counter_memparse_write_strategy(const char *buf, | 
|  | 80 | unsigned long long *res); | 
|  | 81 |  | 
|  | 82 | int res_counter_write(struct res_counter *counter, int member, | 
|  | 83 | const char *buffer, write_strategy_fn write_strategy); | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 84 |  | 
|  | 85 | /* | 
|  | 86 | * the field descriptors. one for each member of res_counter | 
|  | 87 | */ | 
|  | 88 |  | 
|  | 89 | enum { | 
|  | 90 | RES_USAGE, | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 91 | RES_MAX_USAGE, | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 92 | RES_LIMIT, | 
|  | 93 | RES_FAILCNT, | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 94 | RES_SOFT_LIMIT, | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 95 | }; | 
|  | 96 |  | 
|  | 97 | /* | 
|  | 98 | * helpers for accounting | 
|  | 99 | */ | 
|  | 100 |  | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 101 | void res_counter_init(struct res_counter *counter, struct res_counter *parent); | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 102 |  | 
|  | 103 | /* | 
|  | 104 | * charge - try to consume more resource. | 
|  | 105 | * | 
|  | 106 | * @counter: the counter | 
|  | 107 | * @val: the amount of the resource. each controller defines its own | 
|  | 108 | *       units, e.g. numbers, bytes, Kbytes, etc | 
|  | 109 | * | 
|  | 110 | * returns 0 on success and <0 if the counter->usage will exceed the | 
|  | 111 | * counter->limit _locked call expects the counter->lock to be taken | 
|  | 112 | */ | 
|  | 113 |  | 
| Pavel Emelyanov | f2992db | 2008-07-25 01:46:55 -0700 | [diff] [blame] | 114 | int __must_check res_counter_charge_locked(struct res_counter *counter, | 
|  | 115 | unsigned long val); | 
|  | 116 | int __must_check res_counter_charge(struct res_counter *counter, | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 117 | unsigned long val, struct res_counter **limit_fail_at); | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 118 |  | 
|  | 119 | /* | 
|  | 120 | * uncharge - tell that some portion of the resource is released | 
|  | 121 | * | 
|  | 122 | * @counter: the counter | 
|  | 123 | * @val: the amount of the resource | 
|  | 124 | * | 
|  | 125 | * these calls check for usage underflow and show a warning on the console | 
|  | 126 | * _locked call expects the counter->lock to be taken | 
|  | 127 | */ | 
|  | 128 |  | 
|  | 129 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 130 | void res_counter_uncharge(struct res_counter *counter, unsigned long val); | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 131 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 132 | static inline bool res_counter_limit_check_locked(struct res_counter *cnt) | 
|  | 133 | { | 
|  | 134 | if (cnt->usage < cnt->limit) | 
|  | 135 | return true; | 
|  | 136 |  | 
|  | 137 | return false; | 
|  | 138 | } | 
|  | 139 |  | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 140 | static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt) | 
|  | 141 | { | 
|  | 142 | if (cnt->usage < cnt->soft_limit) | 
|  | 143 | return true; | 
|  | 144 |  | 
|  | 145 | return false; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | /** | 
|  | 149 | * Get the difference between the usage and the soft limit | 
|  | 150 | * @cnt: The counter | 
|  | 151 | * | 
|  | 152 | * Returns 0 if usage is less than or equal to soft limit | 
|  | 153 | * The difference between usage and soft limit, otherwise. | 
|  | 154 | */ | 
|  | 155 | static inline unsigned long long | 
|  | 156 | res_counter_soft_limit_excess(struct res_counter *cnt) | 
|  | 157 | { | 
|  | 158 | unsigned long long excess; | 
|  | 159 | unsigned long flags; | 
|  | 160 |  | 
|  | 161 | spin_lock_irqsave(&cnt->lock, flags); | 
|  | 162 | if (cnt->usage <= cnt->soft_limit) | 
|  | 163 | excess = 0; | 
|  | 164 | else | 
|  | 165 | excess = cnt->usage - cnt->soft_limit; | 
|  | 166 | spin_unlock_irqrestore(&cnt->lock, flags); | 
|  | 167 | return excess; | 
|  | 168 | } | 
|  | 169 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 170 | /* | 
|  | 171 | * Helper function to detect if the cgroup is within it's limit or | 
|  | 172 | * not. It's currently called from cgroup_rss_prepare() | 
|  | 173 | */ | 
|  | 174 | static inline bool res_counter_check_under_limit(struct res_counter *cnt) | 
|  | 175 | { | 
|  | 176 | bool ret; | 
|  | 177 | unsigned long flags; | 
|  | 178 |  | 
|  | 179 | spin_lock_irqsave(&cnt->lock, flags); | 
|  | 180 | ret = res_counter_limit_check_locked(cnt); | 
|  | 181 | spin_unlock_irqrestore(&cnt->lock, flags); | 
|  | 182 | return ret; | 
|  | 183 | } | 
|  | 184 |  | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 185 | static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt) | 
|  | 186 | { | 
|  | 187 | bool ret; | 
|  | 188 | unsigned long flags; | 
|  | 189 |  | 
|  | 190 | spin_lock_irqsave(&cnt->lock, flags); | 
|  | 191 | ret = res_counter_soft_limit_check_locked(cnt); | 
|  | 192 | spin_unlock_irqrestore(&cnt->lock, flags); | 
|  | 193 | return ret; | 
|  | 194 | } | 
|  | 195 |  | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 196 | static inline void res_counter_reset_max(struct res_counter *cnt) | 
|  | 197 | { | 
|  | 198 | unsigned long flags; | 
|  | 199 |  | 
|  | 200 | spin_lock_irqsave(&cnt->lock, flags); | 
|  | 201 | cnt->max_usage = cnt->usage; | 
|  | 202 | spin_unlock_irqrestore(&cnt->lock, flags); | 
|  | 203 | } | 
|  | 204 |  | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 205 | static inline void res_counter_reset_failcnt(struct res_counter *cnt) | 
|  | 206 | { | 
|  | 207 | unsigned long flags; | 
|  | 208 |  | 
|  | 209 | spin_lock_irqsave(&cnt->lock, flags); | 
|  | 210 | cnt->failcnt = 0; | 
|  | 211 | spin_unlock_irqrestore(&cnt->lock, flags); | 
|  | 212 | } | 
| KAMEZAWA Hiroyuki | 12b9804 | 2008-07-25 01:47:19 -0700 | [diff] [blame] | 213 |  | 
|  | 214 | static inline int res_counter_set_limit(struct res_counter *cnt, | 
|  | 215 | unsigned long long limit) | 
|  | 216 | { | 
|  | 217 | unsigned long flags; | 
|  | 218 | int ret = -EBUSY; | 
|  | 219 |  | 
|  | 220 | spin_lock_irqsave(&cnt->lock, flags); | 
| Li Zefan | 11d55d2 | 2008-09-05 14:00:18 -0700 | [diff] [blame] | 221 | if (cnt->usage <= limit) { | 
| KAMEZAWA Hiroyuki | 12b9804 | 2008-07-25 01:47:19 -0700 | [diff] [blame] | 222 | cnt->limit = limit; | 
|  | 223 | ret = 0; | 
|  | 224 | } | 
|  | 225 | spin_unlock_irqrestore(&cnt->lock, flags); | 
|  | 226 | return ret; | 
|  | 227 | } | 
|  | 228 |  | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 229 | static inline int | 
|  | 230 | res_counter_set_soft_limit(struct res_counter *cnt, | 
|  | 231 | unsigned long long soft_limit) | 
|  | 232 | { | 
|  | 233 | unsigned long flags; | 
|  | 234 |  | 
|  | 235 | spin_lock_irqsave(&cnt->lock, flags); | 
|  | 236 | cnt->soft_limit = soft_limit; | 
|  | 237 | spin_unlock_irqrestore(&cnt->lock, flags); | 
|  | 238 | return 0; | 
|  | 239 | } | 
|  | 240 |  | 
| Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 241 | #endif |