| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier. | 
|  | 3 | * | 
|  | 4 | *		This program is free software; you can redistribute it and/or | 
|  | 5 | *		modify it under the terms of the GNU General Public License | 
|  | 6 | *		as published by the Free Software Foundation; either version | 
|  | 7 | *		2 of the License, or (at your option) any later version. | 
|  | 8 | * | 
|  | 9 | * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 
|  | 10 | * | 
|  | 11 | *	The filters are packed to hash tables of key nodes | 
|  | 12 | *	with a set of 32bit key/mask pairs at every node. | 
|  | 13 | *	Nodes reference next level hash tables etc. | 
|  | 14 | * | 
|  | 15 | *	This scheme is the best universal classifier I managed to | 
|  | 16 | *	invent; it is not super-fast, but it is not slow (provided you | 
|  | 17 | *	program it correctly), and general enough.  And its relative | 
|  | 18 | *	speed grows as the number of rules becomes larger. | 
|  | 19 | * | 
|  | 20 | *	It seems that it represents the best middle point between | 
|  | 21 | *	speed and manageability both by human and by machine. | 
|  | 22 | * | 
|  | 23 | *	It is especially useful for link sharing combined with QoS; | 
|  | 24 | *	pure RSVP doesn't need such a general approach and can use | 
|  | 25 | *	much simpler (and faster) schemes, sort of cls_rsvp.c. | 
|  | 26 | * | 
|  | 27 | *	JHS: We should remove the CONFIG_NET_CLS_IND from here | 
|  | 28 | *	eventually when the meta match extension is made available | 
|  | 29 | * | 
|  | 30 | *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro> | 
|  | 31 | */ | 
|  | 32 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <linux/module.h> | 
|  | 34 | #include <linux/types.h> | 
|  | 35 | #include <linux/kernel.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/string.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/errno.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include <linux/rtnetlink.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/skbuff.h> | 
| Patrick McHardy | 0ba4805 | 2007-07-02 22:49:07 -0700 | [diff] [blame] | 40 | #include <net/netlink.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <net/act_api.h> | 
|  | 42 | #include <net/pkt_cls.h> | 
|  | 43 |  | 
|  | 44 | struct tc_u_knode | 
|  | 45 | { | 
|  | 46 | struct tc_u_knode	*next; | 
|  | 47 | u32			handle; | 
|  | 48 | struct tc_u_hnode	*ht_up; | 
|  | 49 | struct tcf_exts		exts; | 
|  | 50 | #ifdef CONFIG_NET_CLS_IND | 
|  | 51 | char                     indev[IFNAMSIZ]; | 
|  | 52 | #endif | 
|  | 53 | u8			fshift; | 
|  | 54 | struct tcf_result	res; | 
|  | 55 | struct tc_u_hnode	*ht_down; | 
|  | 56 | #ifdef CONFIG_CLS_U32_PERF | 
|  | 57 | struct tc_u32_pcnt	*pf; | 
|  | 58 | #endif | 
|  | 59 | #ifdef CONFIG_CLS_U32_MARK | 
|  | 60 | struct tc_u32_mark	mark; | 
|  | 61 | #endif | 
|  | 62 | struct tc_u32_sel	sel; | 
|  | 63 | }; | 
|  | 64 |  | 
|  | 65 | struct tc_u_hnode | 
|  | 66 | { | 
|  | 67 | struct tc_u_hnode	*next; | 
|  | 68 | u32			handle; | 
|  | 69 | u32			prio; | 
|  | 70 | struct tc_u_common	*tp_c; | 
|  | 71 | int			refcnt; | 
|  | 72 | unsigned		divisor; | 
|  | 73 | struct tc_u_knode	*ht[1]; | 
|  | 74 | }; | 
|  | 75 |  | 
|  | 76 | struct tc_u_common | 
|  | 77 | { | 
|  | 78 | struct tc_u_common	*next; | 
|  | 79 | struct tc_u_hnode	*hlist; | 
|  | 80 | struct Qdisc		*q; | 
|  | 81 | int			refcnt; | 
|  | 82 | u32			hgenerator; | 
|  | 83 | }; | 
|  | 84 |  | 
|  | 85 | static struct tcf_ext_map u32_ext_map = { | 
|  | 86 | .action = TCA_U32_ACT, | 
|  | 87 | .police = TCA_U32_POLICE | 
|  | 88 | }; | 
|  | 89 |  | 
|  | 90 | static struct tc_u_common *u32_list; | 
|  | 91 |  | 
|  | 92 | static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift) | 
|  | 93 | { | 
| Radu Rendec | 543821c | 2007-11-07 01:20:12 -0800 | [diff] [blame] | 94 | unsigned h = ntohl(key & sel->hmask)>>fshift; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  | 
|  | 96 | return h; | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) | 
|  | 100 | { | 
|  | 101 | struct { | 
|  | 102 | struct tc_u_knode *knode; | 
|  | 103 | u8		  *ptr; | 
|  | 104 | } stack[TC_U32_MAXDEPTH]; | 
|  | 105 |  | 
|  | 106 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; | 
| Arnaldo Carvalho de Melo | d56f90a | 2007-04-10 20:50:43 -0700 | [diff] [blame] | 107 | u8 *ptr = skb_network_header(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | struct tc_u_knode *n; | 
|  | 109 | int sdepth = 0; | 
|  | 110 | int off2 = 0; | 
|  | 111 | int sel = 0; | 
|  | 112 | #ifdef CONFIG_CLS_U32_PERF | 
|  | 113 | int j; | 
|  | 114 | #endif | 
|  | 115 | int i, r; | 
|  | 116 |  | 
|  | 117 | next_ht: | 
|  | 118 | n = ht->ht[sel]; | 
|  | 119 |  | 
|  | 120 | next_knode: | 
|  | 121 | if (n) { | 
|  | 122 | struct tc_u32_key *key = n->sel.keys; | 
|  | 123 |  | 
|  | 124 | #ifdef CONFIG_CLS_U32_PERF | 
|  | 125 | n->pf->rcnt +=1; | 
|  | 126 | j = 0; | 
|  | 127 | #endif | 
|  | 128 |  | 
|  | 129 | #ifdef CONFIG_CLS_U32_MARK | 
| Thomas Graf | 82e91ff | 2006-11-09 15:19:14 -0800 | [diff] [blame] | 130 | if ((skb->mark & n->mark.mask) != n->mark.val) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | n = n->next; | 
|  | 132 | goto next_knode; | 
|  | 133 | } else { | 
|  | 134 | n->mark.success++; | 
|  | 135 | } | 
|  | 136 | #endif | 
|  | 137 |  | 
|  | 138 | for (i = n->sel.nkeys; i>0; i--, key++) { | 
|  | 139 |  | 
|  | 140 | if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { | 
|  | 141 | n = n->next; | 
|  | 142 | goto next_knode; | 
|  | 143 | } | 
|  | 144 | #ifdef CONFIG_CLS_U32_PERF | 
|  | 145 | n->pf->kcnts[j] +=1; | 
|  | 146 | j++; | 
|  | 147 | #endif | 
|  | 148 | } | 
|  | 149 | if (n->ht_down == NULL) { | 
|  | 150 | check_terminal: | 
|  | 151 | if (n->sel.flags&TC_U32_TERMINAL) { | 
|  | 152 |  | 
|  | 153 | *res = n->res; | 
|  | 154 | #ifdef CONFIG_NET_CLS_IND | 
|  | 155 | if (!tcf_match_indev(skb, n->indev)) { | 
|  | 156 | n = n->next; | 
|  | 157 | goto next_knode; | 
|  | 158 | } | 
|  | 159 | #endif | 
|  | 160 | #ifdef CONFIG_CLS_U32_PERF | 
|  | 161 | n->pf->rhit +=1; | 
|  | 162 | #endif | 
|  | 163 | r = tcf_exts_exec(skb, &n->exts, res); | 
|  | 164 | if (r < 0) { | 
|  | 165 | n = n->next; | 
|  | 166 | goto next_knode; | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | return r; | 
|  | 170 | } | 
|  | 171 | n = n->next; | 
|  | 172 | goto next_knode; | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | /* PUSH */ | 
|  | 176 | if (sdepth >= TC_U32_MAXDEPTH) | 
|  | 177 | goto deadloop; | 
|  | 178 | stack[sdepth].knode = n; | 
|  | 179 | stack[sdepth].ptr = ptr; | 
|  | 180 | sdepth++; | 
|  | 181 |  | 
|  | 182 | ht = n->ht_down; | 
|  | 183 | sel = 0; | 
|  | 184 | if (ht->divisor) | 
|  | 185 | sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift); | 
|  | 186 |  | 
|  | 187 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) | 
|  | 188 | goto next_ht; | 
|  | 189 |  | 
|  | 190 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { | 
|  | 191 | off2 = n->sel.off + 3; | 
|  | 192 | if (n->sel.flags&TC_U32_VAROFFSET) | 
|  | 193 | off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift; | 
|  | 194 | off2 &= ~3; | 
|  | 195 | } | 
|  | 196 | if (n->sel.flags&TC_U32_EAT) { | 
|  | 197 | ptr += off2; | 
|  | 198 | off2 = 0; | 
|  | 199 | } | 
|  | 200 |  | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 201 | if (ptr < skb_tail_pointer(skb)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | goto next_ht; | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | /* POP */ | 
|  | 206 | if (sdepth--) { | 
|  | 207 | n = stack[sdepth].knode; | 
|  | 208 | ht = n->ht_up; | 
|  | 209 | ptr = stack[sdepth].ptr; | 
|  | 210 | goto check_terminal; | 
|  | 211 | } | 
|  | 212 | return -1; | 
|  | 213 |  | 
|  | 214 | deadloop: | 
|  | 215 | if (net_ratelimit()) | 
|  | 216 | printk("cls_u32: dead loop\n"); | 
|  | 217 | return -1; | 
|  | 218 | } | 
|  | 219 |  | 
|  | 220 | static __inline__ struct tc_u_hnode * | 
|  | 221 | u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) | 
|  | 222 | { | 
|  | 223 | struct tc_u_hnode *ht; | 
|  | 224 |  | 
|  | 225 | for (ht = tp_c->hlist; ht; ht = ht->next) | 
|  | 226 | if (ht->handle == handle) | 
|  | 227 | break; | 
|  | 228 |  | 
|  | 229 | return ht; | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 | static __inline__ struct tc_u_knode * | 
|  | 233 | u32_lookup_key(struct tc_u_hnode *ht, u32 handle) | 
|  | 234 | { | 
|  | 235 | unsigned sel; | 
|  | 236 | struct tc_u_knode *n = NULL; | 
|  | 237 |  | 
|  | 238 | sel = TC_U32_HASH(handle); | 
|  | 239 | if (sel > ht->divisor) | 
|  | 240 | goto out; | 
|  | 241 |  | 
|  | 242 | for (n = ht->ht[sel]; n; n = n->next) | 
|  | 243 | if (n->handle == handle) | 
|  | 244 | break; | 
|  | 245 | out: | 
|  | 246 | return n; | 
|  | 247 | } | 
|  | 248 |  | 
|  | 249 |  | 
|  | 250 | static unsigned long u32_get(struct tcf_proto *tp, u32 handle) | 
|  | 251 | { | 
|  | 252 | struct tc_u_hnode *ht; | 
|  | 253 | struct tc_u_common *tp_c = tp->data; | 
|  | 254 |  | 
|  | 255 | if (TC_U32_HTID(handle) == TC_U32_ROOT) | 
|  | 256 | ht = tp->root; | 
|  | 257 | else | 
|  | 258 | ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); | 
|  | 259 |  | 
|  | 260 | if (!ht) | 
|  | 261 | return 0; | 
|  | 262 |  | 
|  | 263 | if (TC_U32_KEY(handle) == 0) | 
|  | 264 | return (unsigned long)ht; | 
|  | 265 |  | 
|  | 266 | return (unsigned long)u32_lookup_key(ht, handle); | 
|  | 267 | } | 
|  | 268 |  | 
|  | 269 | static void u32_put(struct tcf_proto *tp, unsigned long f) | 
|  | 270 | { | 
|  | 271 | } | 
|  | 272 |  | 
|  | 273 | static u32 gen_new_htid(struct tc_u_common *tp_c) | 
|  | 274 | { | 
|  | 275 | int i = 0x800; | 
|  | 276 |  | 
|  | 277 | do { | 
|  | 278 | if (++tp_c->hgenerator == 0x7FF) | 
|  | 279 | tp_c->hgenerator = 1; | 
|  | 280 | } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); | 
|  | 281 |  | 
|  | 282 | return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0; | 
|  | 283 | } | 
|  | 284 |  | 
|  | 285 | static int u32_init(struct tcf_proto *tp) | 
|  | 286 | { | 
|  | 287 | struct tc_u_hnode *root_ht; | 
|  | 288 | struct tc_u_common *tp_c; | 
|  | 289 |  | 
|  | 290 | for (tp_c = u32_list; tp_c; tp_c = tp_c->next) | 
|  | 291 | if (tp_c->q == tp->q) | 
|  | 292 | break; | 
|  | 293 |  | 
| Panagiotis Issaris | 0da974f | 2006-07-21 14:51:30 -0700 | [diff] [blame] | 294 | root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | if (root_ht == NULL) | 
|  | 296 | return -ENOBUFS; | 
|  | 297 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | root_ht->divisor = 0; | 
|  | 299 | root_ht->refcnt++; | 
|  | 300 | root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; | 
|  | 301 | root_ht->prio = tp->prio; | 
|  | 302 |  | 
|  | 303 | if (tp_c == NULL) { | 
| Panagiotis Issaris | 0da974f | 2006-07-21 14:51:30 -0700 | [diff] [blame] | 304 | tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | if (tp_c == NULL) { | 
|  | 306 | kfree(root_ht); | 
|  | 307 | return -ENOBUFS; | 
|  | 308 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | tp_c->q = tp->q; | 
|  | 310 | tp_c->next = u32_list; | 
|  | 311 | u32_list = tp_c; | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | tp_c->refcnt++; | 
|  | 315 | root_ht->next = tp_c->hlist; | 
|  | 316 | tp_c->hlist = root_ht; | 
|  | 317 | root_ht->tp_c = tp_c; | 
|  | 318 |  | 
|  | 319 | tp->root = root_ht; | 
|  | 320 | tp->data = tp_c; | 
|  | 321 | return 0; | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n) | 
|  | 325 | { | 
|  | 326 | tcf_unbind_filter(tp, &n->res); | 
|  | 327 | tcf_exts_destroy(tp, &n->exts); | 
|  | 328 | if (n->ht_down) | 
|  | 329 | n->ht_down->refcnt--; | 
|  | 330 | #ifdef CONFIG_CLS_U32_PERF | 
| Patrick McHardy | 1ae39a4 | 2006-03-23 01:16:48 -0800 | [diff] [blame] | 331 | kfree(n->pf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | #endif | 
|  | 333 | kfree(n); | 
|  | 334 | return 0; | 
|  | 335 | } | 
|  | 336 |  | 
|  | 337 | static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key) | 
|  | 338 | { | 
|  | 339 | struct tc_u_knode **kp; | 
|  | 340 | struct tc_u_hnode *ht = key->ht_up; | 
|  | 341 |  | 
|  | 342 | if (ht) { | 
|  | 343 | for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) { | 
|  | 344 | if (*kp == key) { | 
|  | 345 | tcf_tree_lock(tp); | 
|  | 346 | *kp = key->next; | 
|  | 347 | tcf_tree_unlock(tp); | 
|  | 348 |  | 
|  | 349 | u32_destroy_key(tp, key); | 
|  | 350 | return 0; | 
|  | 351 | } | 
|  | 352 | } | 
|  | 353 | } | 
|  | 354 | BUG_TRAP(0); | 
|  | 355 | return 0; | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) | 
|  | 359 | { | 
|  | 360 | struct tc_u_knode *n; | 
|  | 361 | unsigned h; | 
|  | 362 |  | 
|  | 363 | for (h=0; h<=ht->divisor; h++) { | 
|  | 364 | while ((n = ht->ht[h]) != NULL) { | 
|  | 365 | ht->ht[h] = n->next; | 
|  | 366 |  | 
|  | 367 | u32_destroy_key(tp, n); | 
|  | 368 | } | 
|  | 369 | } | 
|  | 370 | } | 
|  | 371 |  | 
|  | 372 | static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) | 
|  | 373 | { | 
|  | 374 | struct tc_u_common *tp_c = tp->data; | 
|  | 375 | struct tc_u_hnode **hn; | 
|  | 376 |  | 
|  | 377 | BUG_TRAP(!ht->refcnt); | 
|  | 378 |  | 
|  | 379 | u32_clear_hnode(tp, ht); | 
|  | 380 |  | 
|  | 381 | for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) { | 
|  | 382 | if (*hn == ht) { | 
|  | 383 | *hn = ht->next; | 
|  | 384 | kfree(ht); | 
|  | 385 | return 0; | 
|  | 386 | } | 
|  | 387 | } | 
|  | 388 |  | 
|  | 389 | BUG_TRAP(0); | 
|  | 390 | return -ENOENT; | 
|  | 391 | } | 
|  | 392 |  | 
|  | 393 | static void u32_destroy(struct tcf_proto *tp) | 
|  | 394 | { | 
|  | 395 | struct tc_u_common *tp_c = tp->data; | 
|  | 396 | struct tc_u_hnode *root_ht = xchg(&tp->root, NULL); | 
|  | 397 |  | 
|  | 398 | BUG_TRAP(root_ht != NULL); | 
|  | 399 |  | 
|  | 400 | if (root_ht && --root_ht->refcnt == 0) | 
|  | 401 | u32_destroy_hnode(tp, root_ht); | 
|  | 402 |  | 
|  | 403 | if (--tp_c->refcnt == 0) { | 
|  | 404 | struct tc_u_hnode *ht; | 
|  | 405 | struct tc_u_common **tp_cp; | 
|  | 406 |  | 
|  | 407 | for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) { | 
|  | 408 | if (*tp_cp == tp_c) { | 
|  | 409 | *tp_cp = tp_c->next; | 
|  | 410 | break; | 
|  | 411 | } | 
|  | 412 | } | 
|  | 413 |  | 
|  | 414 | for (ht=tp_c->hlist; ht; ht = ht->next) | 
|  | 415 | u32_clear_hnode(tp, ht); | 
|  | 416 |  | 
|  | 417 | while ((ht = tp_c->hlist) != NULL) { | 
|  | 418 | tp_c->hlist = ht->next; | 
|  | 419 |  | 
|  | 420 | BUG_TRAP(ht->refcnt == 0); | 
|  | 421 |  | 
|  | 422 | kfree(ht); | 
| Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 423 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 |  | 
|  | 425 | kfree(tp_c); | 
|  | 426 | } | 
|  | 427 |  | 
|  | 428 | tp->data = NULL; | 
|  | 429 | } | 
|  | 430 |  | 
|  | 431 | static int u32_delete(struct tcf_proto *tp, unsigned long arg) | 
|  | 432 | { | 
|  | 433 | struct tc_u_hnode *ht = (struct tc_u_hnode*)arg; | 
|  | 434 |  | 
|  | 435 | if (ht == NULL) | 
|  | 436 | return 0; | 
|  | 437 |  | 
|  | 438 | if (TC_U32_KEY(ht->handle)) | 
|  | 439 | return u32_delete_key(tp, (struct tc_u_knode*)ht); | 
|  | 440 |  | 
|  | 441 | if (tp->root == ht) | 
|  | 442 | return -EINVAL; | 
|  | 443 |  | 
|  | 444 | if (--ht->refcnt == 0) | 
|  | 445 | u32_destroy_hnode(tp, ht); | 
|  | 446 |  | 
|  | 447 | return 0; | 
|  | 448 | } | 
|  | 449 |  | 
|  | 450 | static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) | 
|  | 451 | { | 
|  | 452 | struct tc_u_knode *n; | 
|  | 453 | unsigned i = 0x7FF; | 
|  | 454 |  | 
|  | 455 | for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next) | 
|  | 456 | if (i < TC_U32_NODE(n->handle)) | 
|  | 457 | i = TC_U32_NODE(n->handle); | 
|  | 458 | i++; | 
|  | 459 |  | 
|  | 460 | return handle|(i>0xFFF ? 0xFFF : i); | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | static int u32_set_parms(struct tcf_proto *tp, unsigned long base, | 
|  | 464 | struct tc_u_hnode *ht, | 
|  | 465 | struct tc_u_knode *n, struct rtattr **tb, | 
|  | 466 | struct rtattr *est) | 
|  | 467 | { | 
|  | 468 | int err; | 
|  | 469 | struct tcf_exts e; | 
|  | 470 |  | 
|  | 471 | err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map); | 
|  | 472 | if (err < 0) | 
|  | 473 | return err; | 
|  | 474 |  | 
|  | 475 | err = -EINVAL; | 
|  | 476 | if (tb[TCA_U32_LINK-1]) { | 
|  | 477 | u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]); | 
|  | 478 | struct tc_u_hnode *ht_down = NULL; | 
|  | 479 |  | 
|  | 480 | if (TC_U32_KEY(handle)) | 
|  | 481 | goto errout; | 
|  | 482 |  | 
|  | 483 | if (handle) { | 
|  | 484 | ht_down = u32_lookup_ht(ht->tp_c, handle); | 
|  | 485 |  | 
|  | 486 | if (ht_down == NULL) | 
|  | 487 | goto errout; | 
|  | 488 | ht_down->refcnt++; | 
|  | 489 | } | 
|  | 490 |  | 
|  | 491 | tcf_tree_lock(tp); | 
|  | 492 | ht_down = xchg(&n->ht_down, ht_down); | 
|  | 493 | tcf_tree_unlock(tp); | 
|  | 494 |  | 
|  | 495 | if (ht_down) | 
|  | 496 | ht_down->refcnt--; | 
|  | 497 | } | 
|  | 498 | if (tb[TCA_U32_CLASSID-1]) { | 
|  | 499 | n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]); | 
|  | 500 | tcf_bind_filter(tp, &n->res, base); | 
|  | 501 | } | 
|  | 502 |  | 
|  | 503 | #ifdef CONFIG_NET_CLS_IND | 
|  | 504 | if (tb[TCA_U32_INDEV-1]) { | 
| Stephen Hemminger | bf1b803 | 2007-10-07 23:57:45 -0700 | [diff] [blame] | 505 | err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | if (err < 0) | 
|  | 507 | goto errout; | 
|  | 508 | } | 
|  | 509 | #endif | 
|  | 510 | tcf_exts_change(tp, &n->exts, &e); | 
|  | 511 |  | 
|  | 512 | return 0; | 
|  | 513 | errout: | 
|  | 514 | tcf_exts_destroy(tp, &e); | 
|  | 515 | return err; | 
|  | 516 | } | 
|  | 517 |  | 
|  | 518 | static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, | 
|  | 519 | struct rtattr **tca, | 
|  | 520 | unsigned long *arg) | 
|  | 521 | { | 
|  | 522 | struct tc_u_common *tp_c = tp->data; | 
|  | 523 | struct tc_u_hnode *ht; | 
|  | 524 | struct tc_u_knode *n; | 
|  | 525 | struct tc_u32_sel *s; | 
|  | 526 | struct rtattr *opt = tca[TCA_OPTIONS-1]; | 
|  | 527 | struct rtattr *tb[TCA_U32_MAX]; | 
|  | 528 | u32 htid; | 
|  | 529 | int err; | 
|  | 530 |  | 
|  | 531 | if (opt == NULL) | 
|  | 532 | return handle ? -EINVAL : 0; | 
|  | 533 |  | 
|  | 534 | if (rtattr_parse_nested(tb, TCA_U32_MAX, opt) < 0) | 
|  | 535 | return -EINVAL; | 
|  | 536 |  | 
|  | 537 | if ((n = (struct tc_u_knode*)*arg) != NULL) { | 
|  | 538 | if (TC_U32_KEY(n->handle) == 0) | 
|  | 539 | return -EINVAL; | 
|  | 540 |  | 
|  | 541 | return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE-1]); | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | if (tb[TCA_U32_DIVISOR-1]) { | 
|  | 545 | unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]); | 
|  | 546 |  | 
|  | 547 | if (--divisor > 0x100) | 
|  | 548 | return -EINVAL; | 
|  | 549 | if (TC_U32_KEY(handle)) | 
|  | 550 | return -EINVAL; | 
|  | 551 | if (handle == 0) { | 
|  | 552 | handle = gen_new_htid(tp->data); | 
|  | 553 | if (handle == 0) | 
|  | 554 | return -ENOMEM; | 
|  | 555 | } | 
| Panagiotis Issaris | 0da974f | 2006-07-21 14:51:30 -0700 | [diff] [blame] | 556 | ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | if (ht == NULL) | 
|  | 558 | return -ENOBUFS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | ht->tp_c = tp_c; | 
|  | 560 | ht->refcnt = 0; | 
|  | 561 | ht->divisor = divisor; | 
|  | 562 | ht->handle = handle; | 
|  | 563 | ht->prio = tp->prio; | 
|  | 564 | ht->next = tp_c->hlist; | 
|  | 565 | tp_c->hlist = ht; | 
|  | 566 | *arg = (unsigned long)ht; | 
|  | 567 | return 0; | 
|  | 568 | } | 
|  | 569 |  | 
|  | 570 | if (tb[TCA_U32_HASH-1]) { | 
|  | 571 | htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]); | 
|  | 572 | if (TC_U32_HTID(htid) == TC_U32_ROOT) { | 
|  | 573 | ht = tp->root; | 
|  | 574 | htid = ht->handle; | 
|  | 575 | } else { | 
|  | 576 | ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); | 
|  | 577 | if (ht == NULL) | 
|  | 578 | return -EINVAL; | 
|  | 579 | } | 
|  | 580 | } else { | 
|  | 581 | ht = tp->root; | 
|  | 582 | htid = ht->handle; | 
|  | 583 | } | 
|  | 584 |  | 
|  | 585 | if (ht->divisor < TC_U32_HASH(htid)) | 
|  | 586 | return -EINVAL; | 
|  | 587 |  | 
|  | 588 | if (handle) { | 
|  | 589 | if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid)) | 
|  | 590 | return -EINVAL; | 
|  | 591 | handle = htid | TC_U32_NODE(handle); | 
|  | 592 | } else | 
|  | 593 | handle = gen_new_kid(ht, htid); | 
|  | 594 |  | 
| Stephen Hemminger | cfcabdc | 2007-10-09 01:59:42 -0700 | [diff] [blame] | 595 | if (tb[TCA_U32_SEL-1] == NULL || | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel)) | 
|  | 597 | return -EINVAL; | 
|  | 598 |  | 
|  | 599 | s = RTA_DATA(tb[TCA_U32_SEL-1]); | 
|  | 600 |  | 
| Panagiotis Issaris | 0da974f | 2006-07-21 14:51:30 -0700 | [diff] [blame] | 601 | n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | if (n == NULL) | 
|  | 603 | return -ENOBUFS; | 
|  | 604 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | #ifdef CONFIG_CLS_U32_PERF | 
| Panagiotis Issaris | 0da974f | 2006-07-21 14:51:30 -0700 | [diff] [blame] | 606 | n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | if (n->pf == NULL) { | 
|  | 608 | kfree(n); | 
|  | 609 | return -ENOBUFS; | 
|  | 610 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | #endif | 
|  | 612 |  | 
|  | 613 | memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); | 
|  | 614 | n->ht_up = ht; | 
|  | 615 | n->handle = handle; | 
| Radu Rendec | b226801 | 2007-11-10 21:54:50 -0800 | [diff] [blame] | 616 | n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 |  | 
|  | 618 | #ifdef CONFIG_CLS_U32_MARK | 
|  | 619 | if (tb[TCA_U32_MARK-1]) { | 
|  | 620 | struct tc_u32_mark *mark; | 
|  | 621 |  | 
|  | 622 | if (RTA_PAYLOAD(tb[TCA_U32_MARK-1]) < sizeof(struct tc_u32_mark)) { | 
|  | 623 | #ifdef CONFIG_CLS_U32_PERF | 
|  | 624 | kfree(n->pf); | 
|  | 625 | #endif | 
|  | 626 | kfree(n); | 
|  | 627 | return -EINVAL; | 
|  | 628 | } | 
|  | 629 | mark = RTA_DATA(tb[TCA_U32_MARK-1]); | 
|  | 630 | memcpy(&n->mark, mark, sizeof(struct tc_u32_mark)); | 
|  | 631 | n->mark.success = 0; | 
|  | 632 | } | 
|  | 633 | #endif | 
|  | 634 |  | 
|  | 635 | err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE-1]); | 
|  | 636 | if (err == 0) { | 
|  | 637 | struct tc_u_knode **ins; | 
|  | 638 | for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) | 
|  | 639 | if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle)) | 
|  | 640 | break; | 
|  | 641 |  | 
|  | 642 | n->next = *ins; | 
|  | 643 | wmb(); | 
|  | 644 | *ins = n; | 
|  | 645 |  | 
|  | 646 | *arg = (unsigned long)n; | 
|  | 647 | return 0; | 
|  | 648 | } | 
|  | 649 | #ifdef CONFIG_CLS_U32_PERF | 
| Patrick McHardy | 1ae39a4 | 2006-03-23 01:16:48 -0800 | [diff] [blame] | 650 | kfree(n->pf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | #endif | 
|  | 652 | kfree(n); | 
|  | 653 | return err; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 | static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) | 
|  | 657 | { | 
|  | 658 | struct tc_u_common *tp_c = tp->data; | 
|  | 659 | struct tc_u_hnode *ht; | 
|  | 660 | struct tc_u_knode *n; | 
|  | 661 | unsigned h; | 
|  | 662 |  | 
|  | 663 | if (arg->stop) | 
|  | 664 | return; | 
|  | 665 |  | 
|  | 666 | for (ht = tp_c->hlist; ht; ht = ht->next) { | 
|  | 667 | if (ht->prio != tp->prio) | 
|  | 668 | continue; | 
|  | 669 | if (arg->count >= arg->skip) { | 
|  | 670 | if (arg->fn(tp, (unsigned long)ht, arg) < 0) { | 
|  | 671 | arg->stop = 1; | 
|  | 672 | return; | 
|  | 673 | } | 
|  | 674 | } | 
|  | 675 | arg->count++; | 
|  | 676 | for (h = 0; h <= ht->divisor; h++) { | 
|  | 677 | for (n = ht->ht[h]; n; n = n->next) { | 
|  | 678 | if (arg->count < arg->skip) { | 
|  | 679 | arg->count++; | 
|  | 680 | continue; | 
|  | 681 | } | 
|  | 682 | if (arg->fn(tp, (unsigned long)n, arg) < 0) { | 
|  | 683 | arg->stop = 1; | 
|  | 684 | return; | 
|  | 685 | } | 
|  | 686 | arg->count++; | 
|  | 687 | } | 
|  | 688 | } | 
|  | 689 | } | 
|  | 690 | } | 
|  | 691 |  | 
|  | 692 | static int u32_dump(struct tcf_proto *tp, unsigned long fh, | 
|  | 693 | struct sk_buff *skb, struct tcmsg *t) | 
|  | 694 | { | 
|  | 695 | struct tc_u_knode *n = (struct tc_u_knode*)fh; | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 696 | unsigned char *b = skb_tail_pointer(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | struct rtattr *rta; | 
|  | 698 |  | 
|  | 699 | if (n == NULL) | 
|  | 700 | return skb->len; | 
|  | 701 |  | 
|  | 702 | t->tcm_handle = n->handle; | 
|  | 703 |  | 
|  | 704 | rta = (struct rtattr*)b; | 
|  | 705 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 
|  | 706 |  | 
|  | 707 | if (TC_U32_KEY(n->handle) == 0) { | 
|  | 708 | struct tc_u_hnode *ht = (struct tc_u_hnode*)fh; | 
|  | 709 | u32 divisor = ht->divisor+1; | 
|  | 710 | RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor); | 
|  | 711 | } else { | 
|  | 712 | RTA_PUT(skb, TCA_U32_SEL, | 
|  | 713 | sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), | 
|  | 714 | &n->sel); | 
|  | 715 | if (n->ht_up) { | 
|  | 716 | u32 htid = n->handle & 0xFFFFF000; | 
|  | 717 | RTA_PUT(skb, TCA_U32_HASH, 4, &htid); | 
|  | 718 | } | 
|  | 719 | if (n->res.classid) | 
|  | 720 | RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid); | 
|  | 721 | if (n->ht_down) | 
|  | 722 | RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle); | 
|  | 723 |  | 
|  | 724 | #ifdef CONFIG_CLS_U32_MARK | 
|  | 725 | if (n->mark.val || n->mark.mask) | 
|  | 726 | RTA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark); | 
|  | 727 | #endif | 
|  | 728 |  | 
|  | 729 | if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0) | 
|  | 730 | goto rtattr_failure; | 
|  | 731 |  | 
|  | 732 | #ifdef CONFIG_NET_CLS_IND | 
|  | 733 | if(strlen(n->indev)) | 
|  | 734 | RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev); | 
|  | 735 | #endif | 
|  | 736 | #ifdef CONFIG_CLS_U32_PERF | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 737 | RTA_PUT(skb, TCA_U32_PCNT, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), | 
|  | 739 | n->pf); | 
|  | 740 | #endif | 
|  | 741 | } | 
|  | 742 |  | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 743 | rta->rta_len = skb_tail_pointer(skb) - b; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | if (TC_U32_KEY(n->handle)) | 
|  | 745 | if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0) | 
|  | 746 | goto rtattr_failure; | 
|  | 747 | return skb->len; | 
|  | 748 |  | 
|  | 749 | rtattr_failure: | 
| Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 750 | nlmsg_trim(skb, b); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | return -1; | 
|  | 752 | } | 
|  | 753 |  | 
|  | 754 | static struct tcf_proto_ops cls_u32_ops = { | 
|  | 755 | .next		=	NULL, | 
|  | 756 | .kind		=	"u32", | 
|  | 757 | .classify	=	u32_classify, | 
|  | 758 | .init		=	u32_init, | 
|  | 759 | .destroy	=	u32_destroy, | 
|  | 760 | .get		=	u32_get, | 
|  | 761 | .put		=	u32_put, | 
|  | 762 | .change		=	u32_change, | 
|  | 763 | .delete		=	u32_delete, | 
|  | 764 | .walk		=	u32_walk, | 
|  | 765 | .dump		=	u32_dump, | 
|  | 766 | .owner		=	THIS_MODULE, | 
|  | 767 | }; | 
|  | 768 |  | 
|  | 769 | static int __init init_u32(void) | 
|  | 770 | { | 
|  | 771 | printk("u32 classifier\n"); | 
|  | 772 | #ifdef CONFIG_CLS_U32_PERF | 
| Ralf Hildebrandt | c0956bd | 2006-08-15 02:12:43 -0700 | [diff] [blame] | 773 | printk("    Performance counters on\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | #ifdef CONFIG_NET_CLS_IND | 
|  | 776 | printk("    input device check on \n"); | 
|  | 777 | #endif | 
|  | 778 | #ifdef CONFIG_NET_CLS_ACT | 
|  | 779 | printk("    Actions configured \n"); | 
|  | 780 | #endif | 
|  | 781 | return register_tcf_proto_ops(&cls_u32_ops); | 
|  | 782 | } | 
|  | 783 |  | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 784 | static void __exit exit_u32(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | { | 
|  | 786 | unregister_tcf_proto_ops(&cls_u32_ops); | 
|  | 787 | } | 
|  | 788 |  | 
|  | 789 | module_init(init_u32) | 
|  | 790 | module_exit(exit_u32) | 
|  | 791 | MODULE_LICENSE("GPL"); |