blob: dc3983835893117638d56850338de1bea7495b00 [file] [log] [blame]
Thomas Graff4009232008-11-07 22:56:00 -08001/*
2 * net/sched/cls_cgroup.c Control Group Classifier
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */
11
12#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Thomas Graff4009232008-11-07 22:56:00 -080014#include <linux/types.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <linux/cgroup.h>
Herbert Xuf8451722010-05-24 00:12:34 -070019#include <linux/rcupdate.h>
Daniel Wagner6a328d82012-10-25 04:16:59 +000020#include <linux/fdtable.h>
Thomas Graff4009232008-11-07 22:56:00 -080021#include <net/rtnetlink.h>
22#include <net/pkt_cls.h>
Herbert Xuf8451722010-05-24 00:12:34 -070023#include <net/sock.h>
24#include <net/cls_cgroup.h>
Thomas Graff4009232008-11-07 22:56:00 -080025
Tejun Heoa7c6d552013-08-08 20:11:23 -040026static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
27{
28 return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
29}
30
Li Zefan8e8ba852008-12-29 19:39:03 -080031static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
Thomas Graff4009232008-11-07 22:56:00 -080032{
Tejun Heoa7c6d552013-08-08 20:11:23 -040033 return css_cls_state(cgroup_css(cgrp, net_cls_subsys_id));
Li Zefan8e8ba852008-12-29 19:39:03 -080034}
35
36static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
37{
Tejun Heoa7c6d552013-08-08 20:11:23 -040038 return css_cls_state(task_css(p, net_cls_subsys_id));
Thomas Graff4009232008-11-07 22:56:00 -080039}
40
Tejun Heoeb954192013-08-08 20:11:23 -040041static struct cgroup_subsys_state *
42cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
Thomas Graff4009232008-11-07 22:56:00 -080043{
44 struct cgroup_cls_state *cs;
45
Eric Dumazetcc7ec452011-01-19 19:26:56 +000046 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
47 if (!cs)
Thomas Graff4009232008-11-07 22:56:00 -080048 return ERR_PTR(-ENOMEM);
Thomas Graff4009232008-11-07 22:56:00 -080049 return &cs->css;
50}
51
Tejun Heoeb954192013-08-08 20:11:23 -040052static int cgrp_css_online(struct cgroup_subsys_state *css)
Tejun Heo0ba18f72012-11-22 07:32:46 -080053{
Tejun Heoeb954192013-08-08 20:11:23 -040054 struct cgroup_cls_state *cs = css_cls_state(css);
55 struct cgroup_cls_state *parent = css_cls_state(css_parent(css));
Tejun Heo63876982013-08-08 20:11:23 -040056
57 if (parent)
58 cs->classid = parent->classid;
Tejun Heo0ba18f72012-11-22 07:32:46 -080059 return 0;
60}
61
Tejun Heoeb954192013-08-08 20:11:23 -040062static void cgrp_css_free(struct cgroup_subsys_state *css)
Thomas Graff4009232008-11-07 22:56:00 -080063{
Tejun Heoeb954192013-08-08 20:11:23 -040064 kfree(css_cls_state(css));
Thomas Graff4009232008-11-07 22:56:00 -080065}
66
Daniel Wagner6a328d82012-10-25 04:16:59 +000067static int update_classid(const void *v, struct file *file, unsigned n)
68{
69 int err;
70 struct socket *sock = sock_from_file(file, &err);
71 if (sock)
72 sock->sk->sk_classid = (u32)(unsigned long)v;
73 return 0;
74}
75
Tejun Heoeb954192013-08-08 20:11:23 -040076static void cgrp_attach(struct cgroup_subsys_state *css,
77 struct cgroup_taskset *tset)
Daniel Wagner6a328d82012-10-25 04:16:59 +000078{
79 struct task_struct *p;
80 void *v;
81
Tejun Heoeb954192013-08-08 20:11:23 -040082 cgroup_taskset_for_each(p, css->cgroup, tset) {
Daniel Wagner6a328d82012-10-25 04:16:59 +000083 task_lock(p);
84 v = (void *)(unsigned long)task_cls_classid(p);
85 iterate_fd(p->files, 0, update_classid, v);
86 task_unlock(p);
87 }
88}
89
Thomas Graff4009232008-11-07 22:56:00 -080090static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
91{
Li Zefan8e8ba852008-12-29 19:39:03 -080092 return cgrp_cls_state(cgrp)->classid;
Thomas Graff4009232008-11-07 22:56:00 -080093}
94
95static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
96{
Li Zefan8e8ba852008-12-29 19:39:03 -080097 cgrp_cls_state(cgrp)->classid = (u32) value;
Thomas Graff4009232008-11-07 22:56:00 -080098 return 0;
99}
100
101static struct cftype ss_files[] = {
102 {
103 .name = "classid",
104 .read_u64 = read_classid,
105 .write_u64 = write_classid,
106 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700107 { } /* terminate */
Thomas Graff4009232008-11-07 22:56:00 -0800108};
109
Tejun Heo676f7c82012-04-01 12:09:55 -0700110struct cgroup_subsys net_cls_subsys = {
111 .name = "net_cls",
Tejun Heo92fb9742012-11-19 08:13:38 -0800112 .css_alloc = cgrp_css_alloc,
Tejun Heo0ba18f72012-11-22 07:32:46 -0800113 .css_online = cgrp_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -0800114 .css_free = cgrp_css_free,
Daniel Wagner6a328d82012-10-25 04:16:59 +0000115 .attach = cgrp_attach,
Tejun Heo676f7c82012-04-01 12:09:55 -0700116 .subsys_id = net_cls_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -0700117 .base_cftypes = ss_files,
Tejun Heo676f7c82012-04-01 12:09:55 -0700118 .module = THIS_MODULE,
119};
120
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000121struct cls_cgroup_head {
Thomas Graff4009232008-11-07 22:56:00 -0800122 u32 handle;
123 struct tcf_exts exts;
124 struct tcf_ematch_tree ematches;
125};
126
Eric Dumazetdc7f9f62011-07-05 23:25:42 +0000127static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
Thomas Graff4009232008-11-07 22:56:00 -0800128 struct tcf_result *res)
129{
130 struct cls_cgroup_head *head = tp->root;
Paul Menagee65fcfd2009-05-26 20:47:02 -0700131 u32 classid;
Thomas Graff4009232008-11-07 22:56:00 -0800132
Herbert Xuf8451722010-05-24 00:12:34 -0700133 rcu_read_lock();
134 classid = task_cls_state(current)->classid;
135 rcu_read_unlock();
136
Thomas Graff4009232008-11-07 22:56:00 -0800137 /*
138 * Due to the nature of the classifier it is required to ignore all
139 * packets originating from softirq context as accessing `current'
140 * would lead to false results.
141 *
142 * This test assumes that all callers of dev_queue_xmit() explicitely
143 * disable bh. Knowing this, it is possible to detect softirq based
144 * calls by looking at the number of nested bh disable calls because
145 * softirqs always disables bh.
146 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700147 if (in_serving_softirq()) {
Herbert Xuf8451722010-05-24 00:12:34 -0700148 /* If there is an sk_classid we'll use that. */
149 if (!skb->sk)
150 return -1;
151 classid = skb->sk->sk_classid;
152 }
Thomas Graff4009232008-11-07 22:56:00 -0800153
Paul Menagee65fcfd2009-05-26 20:47:02 -0700154 if (!classid)
155 return -1;
156
157 if (!tcf_em_tree_match(skb, &head->ematches, NULL))
158 return -1;
159
160 res->classid = classid;
161 res->class = 0;
162 return tcf_exts_exec(skb, &head->exts, res);
Thomas Graff4009232008-11-07 22:56:00 -0800163}
164
165static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
166{
167 return 0UL;
168}
169
170static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
171{
172}
173
174static int cls_cgroup_init(struct tcf_proto *tp)
175{
176 return 0;
177}
178
179static const struct tcf_ext_map cgroup_ext_map = {
180 .action = TCA_CGROUP_ACT,
181 .police = TCA_CGROUP_POLICE,
182};
183
184static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
185 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
186};
187
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000188static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
Eric W. Biedermanaf4c6642012-05-25 13:42:45 -0600189 struct tcf_proto *tp, unsigned long base,
Thomas Graff4009232008-11-07 22:56:00 -0800190 u32 handle, struct nlattr **tca,
191 unsigned long *arg)
192{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000193 struct nlattr *tb[TCA_CGROUP_MAX + 1];
Thomas Graff4009232008-11-07 22:56:00 -0800194 struct cls_cgroup_head *head = tp->root;
195 struct tcf_ematch_tree t;
196 struct tcf_exts e;
197 int err;
198
Minoru Usui52ea3a52009-06-09 04:03:09 -0700199 if (!tca[TCA_OPTIONS])
200 return -EINVAL;
201
Thomas Graff4009232008-11-07 22:56:00 -0800202 if (head == NULL) {
203 if (!handle)
204 return -EINVAL;
205
206 head = kzalloc(sizeof(*head), GFP_KERNEL);
207 if (head == NULL)
208 return -ENOBUFS;
209
210 head->handle = handle;
211
212 tcf_tree_lock(tp);
213 tp->root = head;
214 tcf_tree_unlock(tp);
215 }
216
217 if (handle != head->handle)
218 return -ENOENT;
219
220 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
221 cgroup_policy);
222 if (err < 0)
223 return err;
224
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000225 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e,
226 &cgroup_ext_map);
Thomas Graff4009232008-11-07 22:56:00 -0800227 if (err < 0)
228 return err;
229
230 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
231 if (err < 0)
232 return err;
233
234 tcf_exts_change(tp, &head->exts, &e);
235 tcf_em_tree_change(tp, &head->ematches, &t);
236
237 return 0;
238}
239
240static void cls_cgroup_destroy(struct tcf_proto *tp)
241{
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000242 struct cls_cgroup_head *head = tp->root;
Thomas Graff4009232008-11-07 22:56:00 -0800243
244 if (head) {
245 tcf_exts_destroy(tp, &head->exts);
246 tcf_em_tree_destroy(tp, &head->ematches);
247 kfree(head);
248 }
249}
250
251static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
252{
253 return -EOPNOTSUPP;
254}
255
256static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
257{
258 struct cls_cgroup_head *head = tp->root;
259
260 if (arg->count < arg->skip)
261 goto skip;
262
263 if (arg->fn(tp, (unsigned long) head, arg) < 0) {
264 arg->stop = 1;
265 return;
266 }
267skip:
268 arg->count++;
269}
270
271static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
272 struct sk_buff *skb, struct tcmsg *t)
273{
274 struct cls_cgroup_head *head = tp->root;
275 unsigned char *b = skb_tail_pointer(skb);
276 struct nlattr *nest;
277
278 t->tcm_handle = head->handle;
279
280 nest = nla_nest_start(skb, TCA_OPTIONS);
281 if (nest == NULL)
282 goto nla_put_failure;
283
284 if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
285 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
286 goto nla_put_failure;
287
288 nla_nest_end(skb, nest);
289
290 if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
291 goto nla_put_failure;
292
293 return skb->len;
294
295nla_put_failure:
296 nlmsg_trim(skb, b);
297 return -1;
298}
299
300static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
301 .kind = "cgroup",
302 .init = cls_cgroup_init,
303 .change = cls_cgroup_change,
304 .classify = cls_cgroup_classify,
305 .destroy = cls_cgroup_destroy,
306 .get = cls_cgroup_get,
307 .put = cls_cgroup_put,
308 .delete = cls_cgroup_delete,
309 .walk = cls_cgroup_walk,
310 .dump = cls_cgroup_dump,
311 .owner = THIS_MODULE,
312};
313
314static int __init init_cgroup_cls(void)
315{
Herbert Xuf8451722010-05-24 00:12:34 -0700316 int ret;
317
Ben Blum8e039d82010-03-23 05:24:03 +0000318 ret = cgroup_load_subsys(&net_cls_subsys);
319 if (ret)
Herbert Xuf8451722010-05-24 00:12:34 -0700320 goto out;
321
Herbert Xuf8451722010-05-24 00:12:34 -0700322 ret = register_tcf_proto_ops(&cls_cgroup_ops);
323 if (ret)
324 cgroup_unload_subsys(&net_cls_subsys);
325
326out:
Ben Blum8e039d82010-03-23 05:24:03 +0000327 return ret;
Thomas Graff4009232008-11-07 22:56:00 -0800328}
329
330static void __exit exit_cgroup_cls(void)
331{
332 unregister_tcf_proto_ops(&cls_cgroup_ops);
Herbert Xuf8451722010-05-24 00:12:34 -0700333
Ben Blum8e039d82010-03-23 05:24:03 +0000334 cgroup_unload_subsys(&net_cls_subsys);
Thomas Graff4009232008-11-07 22:56:00 -0800335}
336
337module_init(init_cgroup_cls);
338module_exit(exit_cgroup_cls);
339MODULE_LICENSE("GPL");