blob: a691c4dd65bef8ae9e247486326878eb276487f7 [file] [log] [blame]
Li Zefan2e76c242013-03-29 14:36:31 +08001#include <linux/cgroup.h>
2#include <linux/slab.h>
3#include <linux/percpu.h>
4#include <linux/spinlock.h>
5#include <linux/cpumask.h>
6#include <linux/seq_file.h>
7#include <linux/rcupdate.h>
8#include <linux/kernel_stat.h>
9
10#include "sched.h"
11
12/*
13 * CPU accounting code for task groups.
14 *
15 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
16 * (balbir@in.ibm.com).
17 */
18
Li Zefand1712792013-03-29 14:38:13 +080019/* Time spent by the tasks of the cpu accounting group executing in ... */
20enum cpuacct_stat_index {
21 CPUACCT_STAT_USER, /* ... user mode */
22 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
23
24 CPUACCT_STAT_NSTATS,
25};
26
27/* track cpu usage of a group of tasks and its child groups */
28struct cpuacct {
29 struct cgroup_subsys_state css;
30 /* cpuusage holds pointer to a u64-type object on every cpu */
31 u64 __percpu *cpuusage;
32 struct kernel_cpustat __percpu *cpustat;
33};
34
35/* return cpu accounting group corresponding to this container */
36static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
37{
38 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
39 struct cpuacct, css);
40}
41
42/* return cpu accounting group to which this task belongs */
43static inline struct cpuacct *task_ca(struct task_struct *tsk)
44{
45 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
46 struct cpuacct, css);
47}
48
49static inline struct cpuacct *__parent_ca(struct cpuacct *ca)
50{
51 return cgroup_ca(ca->css.cgroup->parent);
52}
53
54static inline struct cpuacct *parent_ca(struct cpuacct *ca)
55{
56 if (!ca->css.cgroup->parent)
57 return NULL;
58 return cgroup_ca(ca->css.cgroup->parent);
59}
60
Li Zefan7943e152013-03-29 14:43:46 +080061static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
Li Zefand1712792013-03-29 14:38:13 +080062static struct cpuacct root_cpuacct;
Li Zefan2e76c242013-03-29 14:36:31 +080063
64/* create a new cpu accounting group */
65static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
66{
67 struct cpuacct *ca;
68
69 if (!cgrp->parent)
70 return &root_cpuacct.css;
71
72 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
73 if (!ca)
74 goto out;
75
76 ca->cpuusage = alloc_percpu(u64);
77 if (!ca->cpuusage)
78 goto out_free_ca;
79
80 ca->cpustat = alloc_percpu(struct kernel_cpustat);
81 if (!ca->cpustat)
82 goto out_free_cpuusage;
83
84 return &ca->css;
85
86out_free_cpuusage:
87 free_percpu(ca->cpuusage);
88out_free_ca:
89 kfree(ca);
90out:
91 return ERR_PTR(-ENOMEM);
92}
93
94/* destroy an existing cpu accounting group */
95static void cpuacct_css_free(struct cgroup *cgrp)
96{
97 struct cpuacct *ca = cgroup_ca(cgrp);
98
99 free_percpu(ca->cpustat);
100 free_percpu(ca->cpuusage);
101 kfree(ca);
102}
103
104static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
105{
106 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
107 u64 data;
108
109#ifndef CONFIG_64BIT
110 /*
111 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
112 */
113 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
114 data = *cpuusage;
115 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
116#else
117 data = *cpuusage;
118#endif
119
120 return data;
121}
122
123static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
124{
125 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
126
127#ifndef CONFIG_64BIT
128 /*
129 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
130 */
131 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
132 *cpuusage = val;
133 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
134#else
135 *cpuusage = val;
136#endif
137}
138
139/* return total cpu usage (in nanoseconds) of a group */
140static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
141{
142 struct cpuacct *ca = cgroup_ca(cgrp);
143 u64 totalcpuusage = 0;
144 int i;
145
146 for_each_present_cpu(i)
147 totalcpuusage += cpuacct_cpuusage_read(ca, i);
148
149 return totalcpuusage;
150}
151
152static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
153 u64 reset)
154{
155 struct cpuacct *ca = cgroup_ca(cgrp);
156 int err = 0;
157 int i;
158
159 if (reset) {
160 err = -EINVAL;
161 goto out;
162 }
163
164 for_each_present_cpu(i)
165 cpuacct_cpuusage_write(ca, i, 0);
166
167out:
168 return err;
169}
170
171static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
172 struct seq_file *m)
173{
174 struct cpuacct *ca = cgroup_ca(cgroup);
175 u64 percpu;
176 int i;
177
178 for_each_present_cpu(i) {
179 percpu = cpuacct_cpuusage_read(ca, i);
180 seq_printf(m, "%llu ", (unsigned long long) percpu);
181 }
182 seq_printf(m, "\n");
183 return 0;
184}
185
186static const char * const cpuacct_stat_desc[] = {
187 [CPUACCT_STAT_USER] = "user",
188 [CPUACCT_STAT_SYSTEM] = "system",
189};
190
191static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
192 struct cgroup_map_cb *cb)
193{
194 struct cpuacct *ca = cgroup_ca(cgrp);
195 int cpu;
196 s64 val = 0;
197
198 for_each_online_cpu(cpu) {
199 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
200 val += kcpustat->cpustat[CPUTIME_USER];
201 val += kcpustat->cpustat[CPUTIME_NICE];
202 }
203 val = cputime64_to_clock_t(val);
204 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
205
206 val = 0;
207 for_each_online_cpu(cpu) {
208 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
209 val += kcpustat->cpustat[CPUTIME_SYSTEM];
210 val += kcpustat->cpustat[CPUTIME_IRQ];
211 val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
212 }
213
214 val = cputime64_to_clock_t(val);
215 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
216
217 return 0;
218}
219
220static struct cftype files[] = {
221 {
222 .name = "usage",
223 .read_u64 = cpuusage_read,
224 .write_u64 = cpuusage_write,
225 },
226 {
227 .name = "usage_percpu",
228 .read_seq_string = cpuacct_percpu_seq_read,
229 },
230 {
231 .name = "stat",
232 .read_map = cpuacct_stats_show,
233 },
234 { } /* terminate */
235};
236
237/*
238 * charge this task's execution time to its accounting group.
239 *
240 * called with rq->lock held.
241 */
242void cpuacct_charge(struct task_struct *tsk, u64 cputime)
243{
244 struct cpuacct *ca;
245 int cpu;
246
247 if (unlikely(!cpuacct_subsys.active))
248 return;
249
250 cpu = task_cpu(tsk);
251
252 rcu_read_lock();
253
254 ca = task_ca(tsk);
255
Li Zefan543bc0e2013-03-29 14:37:29 +0800256 while (true) {
Li Zefan2e76c242013-03-29 14:36:31 +0800257 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
258 *cpuusage += cputime;
Li Zefan543bc0e2013-03-29 14:37:29 +0800259
260 ca = parent_ca(ca);
261 if (!ca)
262 break;
Li Zefan2e76c242013-03-29 14:36:31 +0800263 }
264
265 rcu_read_unlock();
266}
267
Li Zefan1966aaf2013-03-29 14:37:06 +0800268/*
269 * Add user/system time to cpuacct.
270 *
271 * Note: it's the caller that updates the account of the root cgroup.
272 */
273void cpuacct_account_field(struct task_struct *p, int index, u64 val)
274{
275 struct kernel_cpustat *kcpustat;
276 struct cpuacct *ca;
277
278 if (unlikely(!cpuacct_subsys.active))
279 return;
280
281 rcu_read_lock();
282 ca = task_ca(p);
Li Zefan5f40d802013-03-29 14:37:43 +0800283 while (ca != &root_cpuacct) {
Li Zefan1966aaf2013-03-29 14:37:06 +0800284 kcpustat = this_cpu_ptr(ca->cpustat);
285 kcpustat->cpustat[index] += val;
Li Zefan5f40d802013-03-29 14:37:43 +0800286 ca = __parent_ca(ca);
Li Zefan1966aaf2013-03-29 14:37:06 +0800287 }
288 rcu_read_unlock();
289}
290
Li Zefandbe4b412013-03-29 14:36:55 +0800291void __init cpuacct_init(void)
292{
293 root_cpuacct.cpustat = &kernel_cpustat;
Li Zefan7943e152013-03-29 14:43:46 +0800294 root_cpuacct.cpuusage = &root_cpuacct_cpuusage;
Li Zefandbe4b412013-03-29 14:36:55 +0800295}
296
Li Zefan2e76c242013-03-29 14:36:31 +0800297struct cgroup_subsys cpuacct_subsys = {
298 .name = "cpuacct",
299 .css_alloc = cpuacct_css_alloc,
300 .css_free = cpuacct_css_free,
301 .subsys_id = cpuacct_subsys_id,
302 .base_cftypes = files,
303};