Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 1 | /* |
| 2 | * resource cgroups |
| 3 | * |
| 4 | * Copyright 2007 OpenVZ SWsoft Inc |
| 5 | * |
| 6 | * Author: Pavel Emelianov <xemul@openvz.org> |
| 7 | * |
| 8 | */ |
| 9 | |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/parser.h> |
| 12 | #include <linux/fs.h> |
Robert P. J. Day | 1aeb272 | 2008-04-29 00:59:25 -0700 | [diff] [blame] | 13 | #include <linux/slab.h> |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 14 | #include <linux/res_counter.h> |
| 15 | #include <linux/uaccess.h> |
| 16 | |
| 17 | void res_counter_init(struct res_counter *counter) |
| 18 | { |
| 19 | spin_lock_init(&counter->lock); |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 20 | counter->limit = (unsigned long long)LLONG_MAX; |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 21 | } |
| 22 | |
| 23 | int res_counter_charge_locked(struct res_counter *counter, unsigned long val) |
| 24 | { |
| 25 | if (counter->usage + val > counter->limit) { |
| 26 | counter->failcnt++; |
| 27 | return -ENOMEM; |
| 28 | } |
| 29 | |
| 30 | counter->usage += val; |
| 31 | return 0; |
| 32 | } |
| 33 | |
| 34 | int res_counter_charge(struct res_counter *counter, unsigned long val) |
| 35 | { |
| 36 | int ret; |
| 37 | unsigned long flags; |
| 38 | |
| 39 | spin_lock_irqsave(&counter->lock, flags); |
| 40 | ret = res_counter_charge_locked(counter, val); |
| 41 | spin_unlock_irqrestore(&counter->lock, flags); |
| 42 | return ret; |
| 43 | } |
| 44 | |
| 45 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) |
| 46 | { |
| 47 | if (WARN_ON(counter->usage < val)) |
| 48 | val = counter->usage; |
| 49 | |
| 50 | counter->usage -= val; |
| 51 | } |
| 52 | |
| 53 | void res_counter_uncharge(struct res_counter *counter, unsigned long val) |
| 54 | { |
| 55 | unsigned long flags; |
| 56 | |
| 57 | spin_lock_irqsave(&counter->lock, flags); |
| 58 | res_counter_uncharge_locked(counter, val); |
| 59 | spin_unlock_irqrestore(&counter->lock, flags); |
| 60 | } |
| 61 | |
| 62 | |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 63 | static inline unsigned long long * |
| 64 | res_counter_member(struct res_counter *counter, int member) |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 65 | { |
| 66 | switch (member) { |
| 67 | case RES_USAGE: |
| 68 | return &counter->usage; |
| 69 | case RES_LIMIT: |
| 70 | return &counter->limit; |
| 71 | case RES_FAILCNT: |
| 72 | return &counter->failcnt; |
| 73 | }; |
| 74 | |
| 75 | BUG(); |
| 76 | return NULL; |
| 77 | } |
| 78 | |
| 79 | ssize_t res_counter_read(struct res_counter *counter, int member, |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 80 | const char __user *userbuf, size_t nbytes, loff_t *pos, |
| 81 | int (*read_strategy)(unsigned long long val, char *st_buf)) |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 82 | { |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 83 | unsigned long long *val; |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 84 | char buf[64], *s; |
| 85 | |
| 86 | s = buf; |
| 87 | val = res_counter_member(counter, member); |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 88 | if (read_strategy) |
| 89 | s += read_strategy(*val, s); |
| 90 | else |
| 91 | s += sprintf(s, "%llu\n", *val); |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 92 | return simple_read_from_buffer((void __user *)userbuf, nbytes, |
| 93 | pos, buf, s - buf); |
| 94 | } |
| 95 | |
Paul Menage | 2c7eabf | 2008-04-29 00:59:58 -0700 | [diff] [blame^] | 96 | u64 res_counter_read_u64(struct res_counter *counter, int member) |
| 97 | { |
| 98 | return *res_counter_member(counter, member); |
| 99 | } |
| 100 | |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 101 | ssize_t res_counter_write(struct res_counter *counter, int member, |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 102 | const char __user *userbuf, size_t nbytes, loff_t *pos, |
| 103 | int (*write_strategy)(char *st_buf, unsigned long long *val)) |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 104 | { |
| 105 | int ret; |
| 106 | char *buf, *end; |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 107 | unsigned long flags; |
| 108 | unsigned long long tmp, *val; |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 109 | |
| 110 | buf = kmalloc(nbytes + 1, GFP_KERNEL); |
| 111 | ret = -ENOMEM; |
| 112 | if (buf == NULL) |
| 113 | goto out; |
| 114 | |
| 115 | buf[nbytes] = '\0'; |
| 116 | ret = -EFAULT; |
| 117 | if (copy_from_user(buf, userbuf, nbytes)) |
| 118 | goto out_free; |
| 119 | |
| 120 | ret = -EINVAL; |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 121 | |
Balbir Singh | fb78922 | 2008-03-04 14:28:24 -0800 | [diff] [blame] | 122 | strstrip(buf); |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 123 | if (write_strategy) { |
| 124 | if (write_strategy(buf, &tmp)) { |
| 125 | goto out_free; |
| 126 | } |
| 127 | } else { |
| 128 | tmp = simple_strtoull(buf, &end, 10); |
| 129 | if (*end != '\0') |
| 130 | goto out_free; |
| 131 | } |
| 132 | spin_lock_irqsave(&counter->lock, flags); |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 133 | val = res_counter_member(counter, member); |
| 134 | *val = tmp; |
Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 135 | spin_unlock_irqrestore(&counter->lock, flags); |
Pavel Emelianov | e552b66 | 2008-02-07 00:13:49 -0800 | [diff] [blame] | 136 | ret = nbytes; |
| 137 | out_free: |
| 138 | kfree(buf); |
| 139 | out: |
| 140 | return ret; |
| 141 | } |