| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * net/sched/cls_cgroup.c	Control Group Classifier | 
|  | 3 | * | 
|  | 4 | *		This program is free software; you can redistribute it and/or | 
|  | 5 | *		modify it under the terms of the GNU General Public License | 
|  | 6 | *		as published by the Free Software Foundation; either version | 
|  | 7 | *		2 of the License, or (at your option) any later version. | 
|  | 8 | * | 
|  | 9 | * Authors:	Thomas Graf <tgraf@suug.ch> | 
|  | 10 | */ | 
|  | 11 |  | 
|  | 12 | #include <linux/module.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 14 | #include <linux/types.h> | 
|  | 15 | #include <linux/string.h> | 
|  | 16 | #include <linux/errno.h> | 
|  | 17 | #include <linux/skbuff.h> | 
|  | 18 | #include <linux/cgroup.h> | 
| Herbert Xu | f845172 | 2010-05-24 00:12:34 -0700 | [diff] [blame] | 19 | #include <linux/rcupdate.h> | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 20 | #include <net/rtnetlink.h> | 
|  | 21 | #include <net/pkt_cls.h> | 
| Herbert Xu | f845172 | 2010-05-24 00:12:34 -0700 | [diff] [blame] | 22 | #include <net/sock.h> | 
|  | 23 | #include <net/cls_cgroup.h> | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 24 |  | 
| Ben Blum | 8e039d8 | 2010-03-23 05:24:03 +0000 | [diff] [blame] | 25 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 
|  | 26 | struct cgroup *cgrp); | 
|  | 27 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); | 
|  | 28 | static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); | 
|  | 29 |  | 
|  | 30 | struct cgroup_subsys net_cls_subsys = { | 
|  | 31 | .name		= "net_cls", | 
|  | 32 | .create		= cgrp_create, | 
|  | 33 | .destroy	= cgrp_destroy, | 
|  | 34 | .populate	= cgrp_populate, | 
|  | 35 | #ifdef CONFIG_NET_CLS_CGROUP | 
|  | 36 | .subsys_id	= net_cls_subsys_id, | 
| Ben Blum | 8e039d8 | 2010-03-23 05:24:03 +0000 | [diff] [blame] | 37 | #endif | 
|  | 38 | .module		= THIS_MODULE, | 
|  | 39 | }; | 
|  | 40 |  | 
|  | 41 |  | 
| Li Zefan | 8e8ba85 | 2008-12-29 19:39:03 -0800 | [diff] [blame] | 42 | static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 43 | { | 
| Li Zefan | 8e8ba85 | 2008-12-29 19:39:03 -0800 | [diff] [blame] | 44 | return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), | 
|  | 45 | struct cgroup_cls_state, css); | 
|  | 46 | } | 
|  | 47 |  | 
|  | 48 | static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p) | 
|  | 49 | { | 
|  | 50 | return container_of(task_subsys_state(p, net_cls_subsys_id), | 
|  | 51 | struct cgroup_cls_state, css); | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 52 | } | 
|  | 53 |  | 
|  | 54 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 
|  | 55 | struct cgroup *cgrp) | 
|  | 56 | { | 
|  | 57 | struct cgroup_cls_state *cs; | 
|  | 58 |  | 
|  | 59 | if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL))) | 
|  | 60 | return ERR_PTR(-ENOMEM); | 
|  | 61 |  | 
|  | 62 | if (cgrp->parent) | 
| Li Zefan | 8e8ba85 | 2008-12-29 19:39:03 -0800 | [diff] [blame] | 63 | cs->classid = cgrp_cls_state(cgrp->parent)->classid; | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 64 |  | 
|  | 65 | return &cs->css; | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | 
|  | 69 | { | 
| Li Zefan | 8e8ba85 | 2008-12-29 19:39:03 -0800 | [diff] [blame] | 70 | kfree(cgrp_cls_state(cgrp)); | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 71 | } | 
|  | 72 |  | 
|  | 73 | static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) | 
|  | 74 | { | 
| Li Zefan | 8e8ba85 | 2008-12-29 19:39:03 -0800 | [diff] [blame] | 75 | return cgrp_cls_state(cgrp)->classid; | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 76 | } | 
|  | 77 |  | 
|  | 78 | static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) | 
|  | 79 | { | 
| Li Zefan | 8e8ba85 | 2008-12-29 19:39:03 -0800 | [diff] [blame] | 80 | cgrp_cls_state(cgrp)->classid = (u32) value; | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 81 | return 0; | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | static struct cftype ss_files[] = { | 
|  | 85 | { | 
|  | 86 | .name = "classid", | 
|  | 87 | .read_u64 = read_classid, | 
|  | 88 | .write_u64 = write_classid, | 
|  | 89 | }, | 
|  | 90 | }; | 
|  | 91 |  | 
|  | 92 | static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | 
|  | 93 | { | 
|  | 94 | return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); | 
|  | 95 | } | 
|  | 96 |  | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 97 | struct cls_cgroup_head | 
|  | 98 | { | 
|  | 99 | u32			handle; | 
|  | 100 | struct tcf_exts		exts; | 
|  | 101 | struct tcf_ematch_tree	ematches; | 
|  | 102 | }; | 
|  | 103 |  | 
|  | 104 | static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | 
|  | 105 | struct tcf_result *res) | 
|  | 106 | { | 
|  | 107 | struct cls_cgroup_head *head = tp->root; | 
| Paul Menage | e65fcfd | 2009-05-26 20:47:02 -0700 | [diff] [blame] | 108 | u32 classid; | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 109 |  | 
| Herbert Xu | f845172 | 2010-05-24 00:12:34 -0700 | [diff] [blame] | 110 | rcu_read_lock(); | 
|  | 111 | classid = task_cls_state(current)->classid; | 
|  | 112 | rcu_read_unlock(); | 
|  | 113 |  | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 114 | /* | 
|  | 115 | * Due to the nature of the classifier it is required to ignore all | 
|  | 116 | * packets originating from softirq context as accessing `current' | 
|  | 117 | * would lead to false results. | 
|  | 118 | * | 
|  | 119 | * This test assumes that all callers of dev_queue_xmit() explicitely | 
|  | 120 | * disable bh. Knowing this, it is possible to detect softirq based | 
|  | 121 | * calls by looking at the number of nested bh disable calls because | 
|  | 122 | * softirqs always disables bh. | 
|  | 123 | */ | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 124 | if (in_serving_softirq()) { | 
| Herbert Xu | f845172 | 2010-05-24 00:12:34 -0700 | [diff] [blame] | 125 | /* If there is an sk_classid we'll use that. */ | 
|  | 126 | if (!skb->sk) | 
|  | 127 | return -1; | 
|  | 128 | classid = skb->sk->sk_classid; | 
|  | 129 | } | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 130 |  | 
| Paul Menage | e65fcfd | 2009-05-26 20:47:02 -0700 | [diff] [blame] | 131 | if (!classid) | 
|  | 132 | return -1; | 
|  | 133 |  | 
|  | 134 | if (!tcf_em_tree_match(skb, &head->ematches, NULL)) | 
|  | 135 | return -1; | 
|  | 136 |  | 
|  | 137 | res->classid = classid; | 
|  | 138 | res->class = 0; | 
|  | 139 | return tcf_exts_exec(skb, &head->exts, res); | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 140 | } | 
|  | 141 |  | 
|  | 142 | static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) | 
|  | 143 | { | 
|  | 144 | return 0UL; | 
|  | 145 | } | 
|  | 146 |  | 
|  | 147 | static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f) | 
|  | 148 | { | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | static int cls_cgroup_init(struct tcf_proto *tp) | 
|  | 152 | { | 
|  | 153 | return 0; | 
|  | 154 | } | 
|  | 155 |  | 
|  | 156 | static const struct tcf_ext_map cgroup_ext_map = { | 
|  | 157 | .action = TCA_CGROUP_ACT, | 
|  | 158 | .police = TCA_CGROUP_POLICE, | 
|  | 159 | }; | 
|  | 160 |  | 
|  | 161 | static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { | 
|  | 162 | [TCA_CGROUP_EMATCHES]	= { .type = NLA_NESTED }, | 
|  | 163 | }; | 
|  | 164 |  | 
|  | 165 | static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base, | 
|  | 166 | u32 handle, struct nlattr **tca, | 
|  | 167 | unsigned long *arg) | 
|  | 168 | { | 
|  | 169 | struct nlattr *tb[TCA_CGROUP_MAX+1]; | 
|  | 170 | struct cls_cgroup_head *head = tp->root; | 
|  | 171 | struct tcf_ematch_tree t; | 
|  | 172 | struct tcf_exts e; | 
|  | 173 | int err; | 
|  | 174 |  | 
| Minoru Usui | 52ea3a5 | 2009-06-09 04:03:09 -0700 | [diff] [blame] | 175 | if (!tca[TCA_OPTIONS]) | 
|  | 176 | return -EINVAL; | 
|  | 177 |  | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 178 | if (head == NULL) { | 
|  | 179 | if (!handle) | 
|  | 180 | return -EINVAL; | 
|  | 181 |  | 
|  | 182 | head = kzalloc(sizeof(*head), GFP_KERNEL); | 
|  | 183 | if (head == NULL) | 
|  | 184 | return -ENOBUFS; | 
|  | 185 |  | 
|  | 186 | head->handle = handle; | 
|  | 187 |  | 
|  | 188 | tcf_tree_lock(tp); | 
|  | 189 | tp->root = head; | 
|  | 190 | tcf_tree_unlock(tp); | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | if (handle != head->handle) | 
|  | 194 | return -ENOENT; | 
|  | 195 |  | 
|  | 196 | err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], | 
|  | 197 | cgroup_policy); | 
|  | 198 | if (err < 0) | 
|  | 199 | return err; | 
|  | 200 |  | 
|  | 201 | err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map); | 
|  | 202 | if (err < 0) | 
|  | 203 | return err; | 
|  | 204 |  | 
|  | 205 | err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); | 
|  | 206 | if (err < 0) | 
|  | 207 | return err; | 
|  | 208 |  | 
|  | 209 | tcf_exts_change(tp, &head->exts, &e); | 
|  | 210 | tcf_em_tree_change(tp, &head->ematches, &t); | 
|  | 211 |  | 
|  | 212 | return 0; | 
|  | 213 | } | 
|  | 214 |  | 
|  | 215 | static void cls_cgroup_destroy(struct tcf_proto *tp) | 
|  | 216 | { | 
| Patrick McHardy | 47a1a1d | 2008-11-19 08:03:09 +0000 | [diff] [blame] | 217 | struct cls_cgroup_head *head = tp->root; | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 218 |  | 
|  | 219 | if (head) { | 
|  | 220 | tcf_exts_destroy(tp, &head->exts); | 
|  | 221 | tcf_em_tree_destroy(tp, &head->ematches); | 
|  | 222 | kfree(head); | 
|  | 223 | } | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) | 
|  | 227 | { | 
|  | 228 | return -EOPNOTSUPP; | 
|  | 229 | } | 
|  | 230 |  | 
|  | 231 | static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) | 
|  | 232 | { | 
|  | 233 | struct cls_cgroup_head *head = tp->root; | 
|  | 234 |  | 
|  | 235 | if (arg->count < arg->skip) | 
|  | 236 | goto skip; | 
|  | 237 |  | 
|  | 238 | if (arg->fn(tp, (unsigned long) head, arg) < 0) { | 
|  | 239 | arg->stop = 1; | 
|  | 240 | return; | 
|  | 241 | } | 
|  | 242 | skip: | 
|  | 243 | arg->count++; | 
|  | 244 | } | 
|  | 245 |  | 
|  | 246 | static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh, | 
|  | 247 | struct sk_buff *skb, struct tcmsg *t) | 
|  | 248 | { | 
|  | 249 | struct cls_cgroup_head *head = tp->root; | 
|  | 250 | unsigned char *b = skb_tail_pointer(skb); | 
|  | 251 | struct nlattr *nest; | 
|  | 252 |  | 
|  | 253 | t->tcm_handle = head->handle; | 
|  | 254 |  | 
|  | 255 | nest = nla_nest_start(skb, TCA_OPTIONS); | 
|  | 256 | if (nest == NULL) | 
|  | 257 | goto nla_put_failure; | 
|  | 258 |  | 
|  | 259 | if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 || | 
|  | 260 | tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) | 
|  | 261 | goto nla_put_failure; | 
|  | 262 |  | 
|  | 263 | nla_nest_end(skb, nest); | 
|  | 264 |  | 
|  | 265 | if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0) | 
|  | 266 | goto nla_put_failure; | 
|  | 267 |  | 
|  | 268 | return skb->len; | 
|  | 269 |  | 
|  | 270 | nla_put_failure: | 
|  | 271 | nlmsg_trim(skb, b); | 
|  | 272 | return -1; | 
|  | 273 | } | 
|  | 274 |  | 
|  | 275 | static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { | 
|  | 276 | .kind		=	"cgroup", | 
|  | 277 | .init		=	cls_cgroup_init, | 
|  | 278 | .change		=	cls_cgroup_change, | 
|  | 279 | .classify	=	cls_cgroup_classify, | 
|  | 280 | .destroy	=	cls_cgroup_destroy, | 
|  | 281 | .get		=	cls_cgroup_get, | 
|  | 282 | .put		=	cls_cgroup_put, | 
|  | 283 | .delete		=	cls_cgroup_delete, | 
|  | 284 | .walk		=	cls_cgroup_walk, | 
|  | 285 | .dump		=	cls_cgroup_dump, | 
|  | 286 | .owner		=	THIS_MODULE, | 
|  | 287 | }; | 
|  | 288 |  | 
|  | 289 | static int __init init_cgroup_cls(void) | 
|  | 290 | { | 
| Herbert Xu | f845172 | 2010-05-24 00:12:34 -0700 | [diff] [blame] | 291 | int ret; | 
|  | 292 |  | 
| Ben Blum | 8e039d8 | 2010-03-23 05:24:03 +0000 | [diff] [blame] | 293 | ret = cgroup_load_subsys(&net_cls_subsys); | 
|  | 294 | if (ret) | 
| Herbert Xu | f845172 | 2010-05-24 00:12:34 -0700 | [diff] [blame] | 295 | goto out; | 
|  | 296 |  | 
|  | 297 | #ifndef CONFIG_NET_CLS_CGROUP | 
|  | 298 | /* We can't use rcu_assign_pointer because this is an int. */ | 
|  | 299 | smp_wmb(); | 
|  | 300 | net_cls_subsys_id = net_cls_subsys.subsys_id; | 
|  | 301 | #endif | 
|  | 302 |  | 
|  | 303 | ret = register_tcf_proto_ops(&cls_cgroup_ops); | 
|  | 304 | if (ret) | 
|  | 305 | cgroup_unload_subsys(&net_cls_subsys); | 
|  | 306 |  | 
|  | 307 | out: | 
| Ben Blum | 8e039d8 | 2010-03-23 05:24:03 +0000 | [diff] [blame] | 308 | return ret; | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 309 | } | 
|  | 310 |  | 
|  | 311 | static void __exit exit_cgroup_cls(void) | 
|  | 312 | { | 
|  | 313 | unregister_tcf_proto_ops(&cls_cgroup_ops); | 
| Herbert Xu | f845172 | 2010-05-24 00:12:34 -0700 | [diff] [blame] | 314 |  | 
|  | 315 | #ifndef CONFIG_NET_CLS_CGROUP | 
|  | 316 | net_cls_subsys_id = -1; | 
|  | 317 | synchronize_rcu(); | 
|  | 318 | #endif | 
|  | 319 |  | 
| Ben Blum | 8e039d8 | 2010-03-23 05:24:03 +0000 | [diff] [blame] | 320 | cgroup_unload_subsys(&net_cls_subsys); | 
| Thomas Graf | f400923 | 2008-11-07 22:56:00 -0800 | [diff] [blame] | 321 | } | 
|  | 322 |  | 
|  | 323 | module_init(init_cgroup_cls); | 
|  | 324 | module_exit(exit_cgroup_cls); | 
|  | 325 | MODULE_LICENSE("GPL"); |