| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version | 
|  | 3 | * | 
|  | 4 | *		This program is free software; you can redistribute it and/or | 
|  | 5 | *		modify it under the terms of the GNU General Public License | 
|  | 6 | *		as published by the Free Software Foundation; either version | 
|  | 7 | *		2 of the License, or (at your option) any later version. | 
|  | 8 | * | 
|  | 9 | * Authors:	Martin Devera, <devik@cdi.cz> | 
|  | 10 | * | 
|  | 11 | * Credits (in time order) for older HTB versions: | 
|  | 12 | *              Stef Coene <stef.coene@docum.org> | 
|  | 13 | *			HTB support at LARTC mailing list | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 14 | *		Ondrej Kraus, <krauso@barr.cz> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | *			found missing INIT_QDISC(htb) | 
|  | 16 | *		Vladimir Smelhaus, Aamer Akhter, Bert Hubert | 
|  | 17 | *			helped a lot to locate nasty class stall bug | 
|  | 18 | *		Andi Kleen, Jamal Hadi, Bert Hubert | 
|  | 19 | *			code review and helpful comments on shaping | 
|  | 20 | *		Tomasz Wrona, <tw@eter.tym.pl> | 
|  | 21 | *			created test case so that I was able to fix nasty bug | 
|  | 22 | *		Wilfried Weissmann | 
|  | 23 | *			spotted bug in dequeue code and helped with fix | 
|  | 24 | *		Jiri Fojtasek | 
|  | 25 | *			fixed requeue routine | 
|  | 26 | *		and many others. thanks. | 
|  | 27 | * | 
|  | 28 | * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $ | 
|  | 29 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/module.h> | 
|  | 31 | #include <asm/uaccess.h> | 
|  | 32 | #include <asm/system.h> | 
|  | 33 | #include <linux/bitops.h> | 
|  | 34 | #include <linux/types.h> | 
|  | 35 | #include <linux/kernel.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/string.h> | 
|  | 37 | #include <linux/mm.h> | 
|  | 38 | #include <linux/socket.h> | 
|  | 39 | #include <linux/sockios.h> | 
|  | 40 | #include <linux/in.h> | 
|  | 41 | #include <linux/errno.h> | 
|  | 42 | #include <linux/interrupt.h> | 
|  | 43 | #include <linux/if_ether.h> | 
|  | 44 | #include <linux/inet.h> | 
|  | 45 | #include <linux/netdevice.h> | 
|  | 46 | #include <linux/etherdevice.h> | 
|  | 47 | #include <linux/notifier.h> | 
|  | 48 | #include <net/ip.h> | 
|  | 49 | #include <net/route.h> | 
|  | 50 | #include <linux/skbuff.h> | 
|  | 51 | #include <linux/list.h> | 
|  | 52 | #include <linux/compiler.h> | 
| Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 53 | #include <net/netlink.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #include <net/sock.h> | 
|  | 55 | #include <net/pkt_sched.h> | 
|  | 56 | #include <linux/rbtree.h> | 
|  | 57 |  | 
|  | 58 | /* HTB algorithm. | 
|  | 59 | Author: devik@cdi.cz | 
|  | 60 | ======================================================================== | 
|  | 61 | HTB is like TBF with multiple classes. It is also similar to CBQ because | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 62 | it allows to assign priority to each class in hierarchy. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | In fact it is another implementation of Floyd's formal sharing. | 
|  | 64 |  | 
|  | 65 | Levels: | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 66 | Each class is assigned level. Leaf has ALWAYS level 0 and root | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level | 
|  | 68 | one less than their parent. | 
|  | 69 | */ | 
|  | 70 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 71 | #define HTB_HSIZE 16		/* classid hash size */ | 
|  | 72 | #define HTB_EWMAC 2		/* rate average over HTB_EWMAC*HTB_HSIZE sec */ | 
|  | 73 | #define HTB_RATECM 1		/* whether to use rate computer */ | 
|  | 74 | #define HTB_HYSTERESIS 1	/* whether to use mode hysteresis for speedup */ | 
|  | 75 | #define HTB_VER 0x30011		/* major must be matched with number suplied by TC as version */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 |  | 
|  | 77 | #if HTB_VER >> 16 != TC_HTB_PROTOVER | 
|  | 78 | #error "Mismatched sch_htb.c and pkt_sch.h" | 
|  | 79 | #endif | 
|  | 80 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | /* used internaly to keep status of single class */ | 
|  | 82 | enum htb_cmode { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 83 | HTB_CANT_SEND,		/* class can't send and can't borrow */ | 
|  | 84 | HTB_MAY_BORROW,		/* class can't send but may borrow */ | 
|  | 85 | HTB_CAN_SEND		/* class can send */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | }; | 
|  | 87 |  | 
|  | 88 | /* interior & leaf nodes; props specific to leaves are marked L: */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 89 | struct htb_class { | 
|  | 90 | /* general class parameters */ | 
|  | 91 | u32 classid; | 
|  | 92 | struct gnet_stats_basic bstats; | 
|  | 93 | struct gnet_stats_queue qstats; | 
|  | 94 | struct gnet_stats_rate_est rate_est; | 
|  | 95 | struct tc_htb_xstats xstats;	/* our special stats */ | 
|  | 96 | int refcnt;		/* usage count of this class */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 |  | 
|  | 98 | #ifdef HTB_RATECM | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 99 | /* rate measurement counters */ | 
|  | 100 | unsigned long rate_bytes, sum_bytes; | 
|  | 101 | unsigned long rate_packets, sum_packets; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | #endif | 
|  | 103 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 104 | /* topology */ | 
|  | 105 | int level;		/* our level (see above) */ | 
|  | 106 | struct htb_class *parent;	/* parent class */ | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 107 | struct hlist_node hlist;	/* classid hash list item */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 108 | struct list_head sibling;	/* sibling list item */ | 
|  | 109 | struct list_head children;	/* children list */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 111 | union { | 
|  | 112 | struct htb_class_leaf { | 
|  | 113 | struct Qdisc *q; | 
|  | 114 | int prio; | 
|  | 115 | int aprio; | 
|  | 116 | int quantum; | 
|  | 117 | int deficit[TC_HTB_MAXDEPTH]; | 
|  | 118 | struct list_head drop_list; | 
|  | 119 | } leaf; | 
|  | 120 | struct htb_class_inner { | 
|  | 121 | struct rb_root feed[TC_HTB_NUMPRIO];	/* feed trees */ | 
|  | 122 | struct rb_node *ptr[TC_HTB_NUMPRIO];	/* current class ptr */ | 
|  | 123 | /* When class changes from state 1->2 and disconnects from | 
|  | 124 | parent's feed then we lost ptr value and start from the | 
|  | 125 | first child again. Here we store classid of the | 
|  | 126 | last valid ptr (used when ptr is NULL). */ | 
|  | 127 | u32 last_ptr_id[TC_HTB_NUMPRIO]; | 
|  | 128 | } inner; | 
|  | 129 | } un; | 
|  | 130 | struct rb_node node[TC_HTB_NUMPRIO];	/* node for self or feed tree */ | 
|  | 131 | struct rb_node pq_node;	/* node for event queue */ | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 132 | psched_time_t pq_key; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 134 | int prio_activity;	/* for which prios are we active */ | 
|  | 135 | enum htb_cmode cmode;	/* current mode of the class */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 137 | /* class attached filters */ | 
|  | 138 | struct tcf_proto *filter_list; | 
|  | 139 | int filter_cnt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 141 | int warned;		/* only one warning about non work conserving .. */ | 
|  | 142 |  | 
|  | 143 | /* token bucket parameters */ | 
|  | 144 | struct qdisc_rate_table *rate;	/* rate table of the class itself */ | 
|  | 145 | struct qdisc_rate_table *ceil;	/* ceiling rate (limits borrows too) */ | 
|  | 146 | long buffer, cbuffer;	/* token bucket depth/rate */ | 
|  | 147 | psched_tdiff_t mbuffer;	/* max wait time */ | 
|  | 148 | long tokens, ctokens;	/* current number of tokens */ | 
|  | 149 | psched_time_t t_c;	/* checkpoint time */ | 
| Jarek Poplawski | 160d5e1 | 2006-12-08 00:26:56 -0800 | [diff] [blame] | 150 |  | 
|  | 151 | int prio;		/* For parent to leaf return possible here */ | 
|  | 152 | int quantum;		/* we do backup. Finally full replacement  */ | 
|  | 153 | /* of un.leaf originals should be done. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | }; | 
|  | 155 |  | 
|  | 156 | /* TODO: maybe compute rate when size is too large .. or drop ? */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 157 | static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate, | 
|  | 158 | int size) | 
|  | 159 | { | 
|  | 160 | int slot = size >> rate->rate.cell_log; | 
|  | 161 | if (slot > 255) { | 
|  | 162 | cl->xstats.giants++; | 
|  | 163 | slot = 255; | 
|  | 164 | } | 
|  | 165 | return rate->data[slot]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | } | 
|  | 167 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 168 | struct htb_sched { | 
|  | 169 | struct list_head root;	/* root classes list */ | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 170 | struct hlist_head hash[HTB_HSIZE];	/* hashed by classid */ | 
|  | 171 | struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 173 | /* self list - roots of self generating tree */ | 
|  | 174 | struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; | 
|  | 175 | int row_mask[TC_HTB_MAXDEPTH]; | 
|  | 176 | struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; | 
|  | 177 | u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 179 | /* self wait list - roots of wait PQs per row */ | 
|  | 180 | struct rb_root wait_pq[TC_HTB_MAXDEPTH]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 182 | /* time of nearest event per level (row) */ | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 183 | psched_time_t near_ev_cache[TC_HTB_MAXDEPTH]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 185 | /* whether we hit non-work conserving class during this dequeue; we use */ | 
|  | 186 | int nwc_hit;		/* this to disable mindelay complaint in dequeue */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 188 | int defcls;		/* class where unclassified flows go to */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 190 | /* filters for qdisc itself */ | 
|  | 191 | struct tcf_proto *filter_list; | 
|  | 192 | int filter_cnt; | 
|  | 193 |  | 
|  | 194 | int rate2quantum;	/* quant = rate / rate2quantum */ | 
|  | 195 | psched_time_t now;	/* cached dequeue time */ | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 196 | struct qdisc_watchdog watchdog; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | #ifdef HTB_RATECM | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 198 | struct timer_list rttim;	/* rate computer timer */ | 
|  | 199 | int recmp_bucket;	/* which hash bucket to recompute next */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 202 | /* non shaped skbs; let them go directly thru */ | 
|  | 203 | struct sk_buff_head direct_queue; | 
|  | 204 | int direct_qlen;	/* max qlen of above */ | 
|  | 205 |  | 
|  | 206 | long direct_pkts; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | }; | 
|  | 208 |  | 
|  | 209 | /* compute hash of size HTB_HSIZE for given handle */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 210 | static inline int htb_hash(u32 h) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | { | 
|  | 212 | #if HTB_HSIZE != 16 | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 213 | #error "Declare new hash for your HTB_HSIZE" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | #endif | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 215 | h ^= h >> 8;		/* stolen from cbq_hash */ | 
|  | 216 | h ^= h >> 4; | 
|  | 217 | return h & 0xf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | } | 
|  | 219 |  | 
|  | 220 | /* find class in global hash table using given handle */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 221 | static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | { | 
|  | 223 | struct htb_sched *q = qdisc_priv(sch); | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 224 | struct hlist_node *p; | 
|  | 225 | struct htb_class *cl; | 
|  | 226 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 227 | if (TC_H_MAJ(handle) != sch->handle) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | return NULL; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 229 |  | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 230 | hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | if (cl->classid == handle) | 
|  | 232 | return cl; | 
|  | 233 | } | 
|  | 234 | return NULL; | 
|  | 235 | } | 
|  | 236 |  | 
|  | 237 | /** | 
|  | 238 | * htb_classify - classify a packet into class | 
|  | 239 | * | 
|  | 240 | * It returns NULL if the packet should be dropped or -1 if the packet | 
|  | 241 | * should be passed directly thru. In all other cases leaf class is returned. | 
|  | 242 | * We allow direct class selection by classid in priority. The we examine | 
|  | 243 | * filters in qdisc and in inner nodes (if higher filter points to the inner | 
|  | 244 | * node). If we end up with classid MAJOR:0 we enqueue the skb into special | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 245 | * internal fifo (direct). These packets then go directly thru. If we still | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull | 
|  | 247 | * then finish and return direct queue. | 
|  | 248 | */ | 
|  | 249 | #define HTB_DIRECT (struct htb_class*)-1 | 
|  | 250 | static inline u32 htb_classid(struct htb_class *cl) | 
|  | 251 | { | 
|  | 252 | return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC; | 
|  | 253 | } | 
|  | 254 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 255 | static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, | 
|  | 256 | int *qerr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | { | 
|  | 258 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 259 | struct htb_class *cl; | 
|  | 260 | struct tcf_result res; | 
|  | 261 | struct tcf_proto *tcf; | 
|  | 262 | int result; | 
|  | 263 |  | 
|  | 264 | /* allow to select class by setting skb->priority to valid classid; | 
|  | 265 | note that nfmark can be used too by attaching filter fw with no | 
|  | 266 | rules in it */ | 
|  | 267 | if (skb->priority == sch->handle) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 268 | return HTB_DIRECT;	/* X:0 (direct flow) selected */ | 
|  | 269 | if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | return cl; | 
|  | 271 |  | 
| Jamal Hadi Salim | 29f1df6 | 2006-01-08 22:35:55 -0800 | [diff] [blame] | 272 | *qerr = NET_XMIT_BYPASS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | tcf = q->filter_list; | 
|  | 274 | while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { | 
|  | 275 | #ifdef CONFIG_NET_CLS_ACT | 
|  | 276 | switch (result) { | 
|  | 277 | case TC_ACT_QUEUED: | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 278 | case TC_ACT_STOLEN: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | *qerr = NET_XMIT_SUCCESS; | 
|  | 280 | case TC_ACT_SHOT: | 
|  | 281 | return NULL; | 
|  | 282 | } | 
|  | 283 | #elif defined(CONFIG_NET_CLS_POLICE) | 
|  | 284 | if (result == TC_POLICE_SHOT) | 
|  | 285 | return HTB_DIRECT; | 
|  | 286 | #endif | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 287 | if ((cl = (void *)res.class) == NULL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | if (res.classid == sch->handle) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 289 | return HTB_DIRECT;	/* X:0 (direct flow) */ | 
|  | 290 | if ((cl = htb_find(res.classid, sch)) == NULL) | 
|  | 291 | break;	/* filter selected invalid classid */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | } | 
|  | 293 | if (!cl->level) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 294 | return cl;	/* we hit leaf; return it */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 |  | 
|  | 296 | /* we have got inner class; apply inner filter chain */ | 
|  | 297 | tcf = cl->filter_list; | 
|  | 298 | } | 
|  | 299 | /* classification failed; try to use default class */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 300 | cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | if (!cl || cl->level) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 302 | return HTB_DIRECT;	/* bad default .. this is safe bet */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | return cl; | 
|  | 304 | } | 
|  | 305 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | /** | 
|  | 307 | * htb_add_to_id_tree - adds class to the round robin list | 
|  | 308 | * | 
|  | 309 | * Routine adds class to the list (actually tree) sorted by classid. | 
|  | 310 | * Make sure that class is not already on such list for given prio. | 
|  | 311 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 312 | static void htb_add_to_id_tree(struct rb_root *root, | 
|  | 313 | struct htb_class *cl, int prio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | { | 
|  | 315 | struct rb_node **p = &root->rb_node, *parent = NULL; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 316 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | while (*p) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 318 | struct htb_class *c; | 
|  | 319 | parent = *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | c = rb_entry(parent, struct htb_class, node[prio]); | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 321 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | if (cl->classid > c->classid) | 
|  | 323 | p = &parent->rb_right; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 324 | else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | p = &parent->rb_left; | 
|  | 326 | } | 
|  | 327 | rb_link_node(&cl->node[prio], parent, p); | 
|  | 328 | rb_insert_color(&cl->node[prio], root); | 
|  | 329 | } | 
|  | 330 |  | 
|  | 331 | /** | 
|  | 332 | * htb_add_to_wait_tree - adds class to the event queue with delay | 
|  | 333 | * | 
|  | 334 | * The class is added to priority event queue to indicate that class will | 
|  | 335 | * change its mode in cl->pq_key microseconds. Make sure that class is not | 
|  | 336 | * already in the queue. | 
|  | 337 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 338 | static void htb_add_to_wait_tree(struct htb_sched *q, | 
|  | 339 | struct htb_class *cl, long delay) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | { | 
|  | 341 | struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 342 |  | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 343 | cl->pq_key = q->now + delay; | 
|  | 344 | if (cl->pq_key == q->now) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | cl->pq_key++; | 
|  | 346 |  | 
|  | 347 | /* update the nearest event cache */ | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 348 | if (q->near_ev_cache[cl->level] > cl->pq_key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | q->near_ev_cache[cl->level] = cl->pq_key; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 350 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | while (*p) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 352 | struct htb_class *c; | 
|  | 353 | parent = *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | c = rb_entry(parent, struct htb_class, pq_node); | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 355 | if (cl->pq_key >= c->pq_key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | p = &parent->rb_right; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 357 | else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | p = &parent->rb_left; | 
|  | 359 | } | 
|  | 360 | rb_link_node(&cl->pq_node, parent, p); | 
|  | 361 | rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]); | 
|  | 362 | } | 
|  | 363 |  | 
|  | 364 | /** | 
|  | 365 | * htb_next_rb_node - finds next node in binary tree | 
|  | 366 | * | 
|  | 367 | * When we are past last key we return NULL. | 
|  | 368 | * Average complexity is 2 steps per call. | 
|  | 369 | */ | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 370 | static inline void htb_next_rb_node(struct rb_node **n) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | { | 
|  | 372 | *n = rb_next(*n); | 
|  | 373 | } | 
|  | 374 |  | 
|  | 375 | /** | 
|  | 376 | * htb_add_class_to_row - add class to its row | 
|  | 377 | * | 
|  | 378 | * The class is added to row at priorities marked in mask. | 
|  | 379 | * It does nothing if mask == 0. | 
|  | 380 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 381 | static inline void htb_add_class_to_row(struct htb_sched *q, | 
|  | 382 | struct htb_class *cl, int mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | q->row_mask[cl->level] |= mask; | 
|  | 385 | while (mask) { | 
|  | 386 | int prio = ffz(~mask); | 
|  | 387 | mask &= ~(1 << prio); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 388 | htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | } | 
|  | 390 | } | 
|  | 391 |  | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 392 | /* If this triggers, it is a bug in this code, but it need not be fatal */ | 
|  | 393 | static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root) | 
|  | 394 | { | 
| Ismail Donmez | 81771b3 | 2006-10-03 13:49:10 -0700 | [diff] [blame] | 395 | if (RB_EMPTY_NODE(rb)) { | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 396 | WARN_ON(1); | 
|  | 397 | } else { | 
|  | 398 | rb_erase(rb, root); | 
|  | 399 | RB_CLEAR_NODE(rb); | 
|  | 400 | } | 
|  | 401 | } | 
|  | 402 |  | 
|  | 403 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | /** | 
|  | 405 | * htb_remove_class_from_row - removes class from its row | 
|  | 406 | * | 
|  | 407 | * The class is removed from row at priorities marked in mask. | 
|  | 408 | * It does nothing if mask == 0. | 
|  | 409 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 410 | static inline void htb_remove_class_from_row(struct htb_sched *q, | 
|  | 411 | struct htb_class *cl, int mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | { | 
|  | 413 | int m = 0; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 414 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | while (mask) { | 
|  | 416 | int prio = ffz(~mask); | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 417 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | mask &= ~(1 << prio); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 419 | if (q->ptr[cl->level][prio] == cl->node + prio) | 
|  | 420 | htb_next_rb_node(q->ptr[cl->level] + prio); | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 421 |  | 
|  | 422 | htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 423 | if (!q->row[cl->level][prio].rb_node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | m |= 1 << prio; | 
|  | 425 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | q->row_mask[cl->level] &= ~m; | 
|  | 427 | } | 
|  | 428 |  | 
|  | 429 | /** | 
|  | 430 | * htb_activate_prios - creates active classe's feed chain | 
|  | 431 | * | 
|  | 432 | * The class is connected to ancestors and/or appropriate rows | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 433 | * for priorities it is participating on. cl->cmode must be new | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | * (activated) mode. It does nothing if cl->prio_activity == 0. | 
|  | 435 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 436 | static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | { | 
|  | 438 | struct htb_class *p = cl->parent; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 439 | long m, mask = cl->prio_activity; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 |  | 
|  | 441 | while (cl->cmode == HTB_MAY_BORROW && p && mask) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 442 | m = mask; | 
|  | 443 | while (m) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | int prio = ffz(~m); | 
|  | 445 | m &= ~(1 << prio); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 446 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | if (p->un.inner.feed[prio].rb_node) | 
|  | 448 | /* parent already has its feed in use so that | 
|  | 449 | reset bit in mask as parent is already ok */ | 
|  | 450 | mask &= ~(1 << prio); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 451 |  | 
|  | 452 | htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | p->prio_activity |= mask; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 455 | cl = p; | 
|  | 456 | p = cl->parent; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 457 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | } | 
|  | 459 | if (cl->cmode == HTB_CAN_SEND && mask) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 460 | htb_add_class_to_row(q, cl, mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | } | 
|  | 462 |  | 
|  | 463 | /** | 
|  | 464 | * htb_deactivate_prios - remove class from feed chain | 
|  | 465 | * | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 466 | * cl->cmode must represent old mode (before deactivation). It does | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | * nothing if cl->prio_activity == 0. Class is removed from all feed | 
|  | 468 | * chains and rows. | 
|  | 469 | */ | 
|  | 470 | static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) | 
|  | 471 | { | 
|  | 472 | struct htb_class *p = cl->parent; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 473 | long m, mask = cl->prio_activity; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 |  | 
|  | 475 | while (cl->cmode == HTB_MAY_BORROW && p && mask) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 476 | m = mask; | 
|  | 477 | mask = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | while (m) { | 
|  | 479 | int prio = ffz(~m); | 
|  | 480 | m &= ~(1 << prio); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 481 |  | 
|  | 482 | if (p->un.inner.ptr[prio] == cl->node + prio) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | /* we are removing child which is pointed to from | 
|  | 484 | parent feed - forget the pointer but remember | 
|  | 485 | classid */ | 
|  | 486 | p->un.inner.last_ptr_id[prio] = cl->classid; | 
|  | 487 | p->un.inner.ptr[prio] = NULL; | 
|  | 488 | } | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 489 |  | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 490 | htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 491 |  | 
|  | 492 | if (!p->un.inner.feed[prio].rb_node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | mask |= 1 << prio; | 
|  | 494 | } | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 495 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | p->prio_activity &= ~mask; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 497 | cl = p; | 
|  | 498 | p = cl->parent; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 499 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | } | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 501 | if (cl->cmode == HTB_CAN_SEND && mask) | 
|  | 502 | htb_remove_class_from_row(q, cl, mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | } | 
|  | 504 |  | 
| Stephen Hemminger | 18a63e8 | 2006-08-10 23:34:02 -0700 | [diff] [blame] | 505 | #if HTB_HYSTERESIS | 
|  | 506 | static inline long htb_lowater(const struct htb_class *cl) | 
|  | 507 | { | 
|  | 508 | return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; | 
|  | 509 | } | 
|  | 510 | static inline long htb_hiwater(const struct htb_class *cl) | 
|  | 511 | { | 
|  | 512 | return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; | 
|  | 513 | } | 
|  | 514 | #else | 
|  | 515 | #define htb_lowater(cl)	(0) | 
|  | 516 | #define htb_hiwater(cl)	(0) | 
|  | 517 | #endif | 
|  | 518 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | /** | 
|  | 520 | * htb_class_mode - computes and returns current class mode | 
|  | 521 | * | 
|  | 522 | * It computes cl's mode at time cl->t_c+diff and returns it. If mode | 
|  | 523 | * is not HTB_CAN_SEND then cl->pq_key is updated to time difference | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 524 | * from now to time when cl will change its state. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | * Also it is worth to note that class mode doesn't change simply | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 526 | * at cl->{c,}tokens == 0 but there can rather be hysteresis of | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | * 0 .. -cl->{c,}buffer range. It is meant to limit number of | 
|  | 528 | * mode transitions per time unit. The speed gain is about 1/6. | 
|  | 529 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 530 | static inline enum htb_cmode | 
|  | 531 | htb_class_mode(struct htb_class *cl, long *diff) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 533 | long toks; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 535 | if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { | 
|  | 536 | *diff = -toks; | 
|  | 537 | return HTB_CANT_SEND; | 
|  | 538 | } | 
| Stephen Hemminger | 18a63e8 | 2006-08-10 23:34:02 -0700 | [diff] [blame] | 539 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 540 | if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) | 
|  | 541 | return HTB_CAN_SEND; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 543 | *diff = -toks; | 
|  | 544 | return HTB_MAY_BORROW; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | } | 
|  | 546 |  | 
|  | 547 | /** | 
|  | 548 | * htb_change_class_mode - changes classe's mode | 
|  | 549 | * | 
|  | 550 | * This should be the only way how to change classe's mode under normal | 
|  | 551 | * cirsumstances. Routine will update feed lists linkage, change mode | 
|  | 552 | * and add class to the wait event queue if appropriate. New mode should | 
|  | 553 | * be different from old one and cl->pq_key has to be valid if changing | 
|  | 554 | * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree). | 
|  | 555 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 556 | static void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 558 | { | 
|  | 559 | enum htb_cmode new_mode = htb_class_mode(cl, diff); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 |  | 
|  | 561 | if (new_mode == cl->cmode) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 562 | return; | 
|  | 563 |  | 
|  | 564 | if (cl->prio_activity) {	/* not necessary: speed optimization */ | 
|  | 565 | if (cl->cmode != HTB_CANT_SEND) | 
|  | 566 | htb_deactivate_prios(q, cl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | cl->cmode = new_mode; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 568 | if (new_mode != HTB_CANT_SEND) | 
|  | 569 | htb_activate_prios(q, cl); | 
|  | 570 | } else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | cl->cmode = new_mode; | 
|  | 572 | } | 
|  | 573 |  | 
|  | 574 | /** | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 575 | * htb_activate - inserts leaf cl into appropriate active feeds | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | * | 
|  | 577 | * Routine learns (new) priority of leaf and activates feed chain | 
|  | 578 | * for the prio. It can be called on already active leaf safely. | 
|  | 579 | * It also adds leaf into droplist. | 
|  | 580 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 581 | static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | { | 
|  | 583 | BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen); | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 584 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | if (!cl->prio_activity) { | 
|  | 586 | cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 587 | htb_activate_prios(q, cl); | 
|  | 588 | list_add_tail(&cl->un.leaf.drop_list, | 
|  | 589 | q->drops + cl->un.leaf.aprio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | } | 
|  | 591 | } | 
|  | 592 |  | 
|  | 593 | /** | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 594 | * htb_deactivate - remove leaf cl from active feeds | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | * | 
|  | 596 | * Make sure that leaf is active. In the other words it can't be called | 
|  | 597 | * with non-active leaf. It also removes class from the drop list. | 
|  | 598 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 599 | static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | { | 
|  | 601 | BUG_TRAP(cl->prio_activity); | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 602 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 603 | htb_deactivate_prios(q, cl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | cl->prio_activity = 0; | 
|  | 605 | list_del_init(&cl->un.leaf.drop_list); | 
|  | 606 | } | 
|  | 607 |  | 
|  | 608 | static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 
|  | 609 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 610 | int ret; | 
|  | 611 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 612 | struct htb_class *cl = htb_classify(skb, sch, &ret); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 614 | if (cl == HTB_DIRECT) { | 
|  | 615 | /* enqueue to helper queue */ | 
|  | 616 | if (q->direct_queue.qlen < q->direct_qlen) { | 
|  | 617 | __skb_queue_tail(&q->direct_queue, skb); | 
|  | 618 | q->direct_pkts++; | 
|  | 619 | } else { | 
|  | 620 | kfree_skb(skb); | 
|  | 621 | sch->qstats.drops++; | 
|  | 622 | return NET_XMIT_DROP; | 
|  | 623 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | #ifdef CONFIG_NET_CLS_ACT | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 625 | } else if (!cl) { | 
|  | 626 | if (ret == NET_XMIT_BYPASS) | 
|  | 627 | sch->qstats.drops++; | 
|  | 628 | kfree_skb(skb); | 
|  | 629 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | #endif | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 631 | } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != | 
|  | 632 | NET_XMIT_SUCCESS) { | 
|  | 633 | sch->qstats.drops++; | 
|  | 634 | cl->qstats.drops++; | 
|  | 635 | return NET_XMIT_DROP; | 
|  | 636 | } else { | 
|  | 637 | cl->bstats.packets++; | 
|  | 638 | cl->bstats.bytes += skb->len; | 
|  | 639 | htb_activate(q, cl); | 
|  | 640 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 642 | sch->q.qlen++; | 
|  | 643 | sch->bstats.packets++; | 
|  | 644 | sch->bstats.bytes += skb->len; | 
|  | 645 | return NET_XMIT_SUCCESS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | } | 
|  | 647 |  | 
|  | 648 | /* TODO: requeuing packet charges it to policers again !! */ | 
|  | 649 | static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) | 
|  | 650 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 651 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 652 | int ret = NET_XMIT_SUCCESS; | 
|  | 653 | struct htb_class *cl = htb_classify(skb, sch, &ret); | 
|  | 654 | struct sk_buff *tskb; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 656 | if (cl == HTB_DIRECT || !cl) { | 
|  | 657 | /* enqueue to helper queue */ | 
|  | 658 | if (q->direct_queue.qlen < q->direct_qlen && cl) { | 
|  | 659 | __skb_queue_head(&q->direct_queue, skb); | 
|  | 660 | } else { | 
|  | 661 | __skb_queue_head(&q->direct_queue, skb); | 
|  | 662 | tskb = __skb_dequeue_tail(&q->direct_queue); | 
|  | 663 | kfree_skb(tskb); | 
|  | 664 | sch->qstats.drops++; | 
|  | 665 | return NET_XMIT_CN; | 
|  | 666 | } | 
|  | 667 | } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != | 
|  | 668 | NET_XMIT_SUCCESS) { | 
|  | 669 | sch->qstats.drops++; | 
|  | 670 | cl->qstats.drops++; | 
|  | 671 | return NET_XMIT_DROP; | 
|  | 672 | } else | 
|  | 673 | htb_activate(q, cl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 675 | sch->q.qlen++; | 
|  | 676 | sch->qstats.requeues++; | 
|  | 677 | return NET_XMIT_SUCCESS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | } | 
|  | 679 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | #ifdef HTB_RATECM | 
|  | 681 | #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0 | 
|  | 682 | static void htb_rate_timer(unsigned long arg) | 
|  | 683 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 684 | struct Qdisc *sch = (struct Qdisc *)arg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | struct htb_sched *q = qdisc_priv(sch); | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 686 | struct hlist_node *p; | 
|  | 687 | struct htb_class *cl; | 
|  | 688 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 |  | 
|  | 690 | /* lock queue so that we can muck with it */ | 
| Stephen Hemminger | 9ac961e | 2006-08-10 23:33:16 -0700 | [diff] [blame] | 691 | spin_lock_bh(&sch->dev->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 |  | 
|  | 693 | q->rttim.expires = jiffies + HZ; | 
|  | 694 | add_timer(&q->rttim); | 
|  | 695 |  | 
|  | 696 | /* scan and recompute one bucket at time */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 697 | if (++q->recmp_bucket >= HTB_HSIZE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | q->recmp_bucket = 0; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 699 |  | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 700 | hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 701 | RT_GEN(cl->sum_bytes, cl->rate_bytes); | 
|  | 702 | RT_GEN(cl->sum_packets, cl->rate_packets); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | } | 
| Stephen Hemminger | 9ac961e | 2006-08-10 23:33:16 -0700 | [diff] [blame] | 704 | spin_unlock_bh(&sch->dev->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | } | 
|  | 706 | #endif | 
|  | 707 |  | 
|  | 708 | /** | 
|  | 709 | * htb_charge_class - charges amount "bytes" to leaf and ancestors | 
|  | 710 | * | 
|  | 711 | * Routine assumes that packet "bytes" long was dequeued from leaf cl | 
|  | 712 | * borrowing from "level". It accounts bytes to ceil leaky bucket for | 
|  | 713 | * leaf and all ancestors and to rate bucket for ancestors at levels | 
|  | 714 | * "level" and higher. It also handles possible change of mode resulting | 
|  | 715 | * from the update. Note that mode can also increase here (MAY_BORROW to | 
|  | 716 | * CAN_SEND) because we can use more precise clock that event queue here. | 
|  | 717 | * In such case we remove class from event queue first. | 
|  | 718 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 719 | static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | 
|  | 720 | int level, int bytes) | 
|  | 721 | { | 
|  | 722 | long toks, diff; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | enum htb_cmode old_mode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 |  | 
|  | 725 | #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \ | 
|  | 726 | if (toks > cl->B) toks = cl->B; \ | 
|  | 727 | toks -= L2T(cl, cl->R, bytes); \ | 
|  | 728 | if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \ | 
|  | 729 | cl->T = toks | 
|  | 730 |  | 
|  | 731 | while (cl) { | 
| Patrick McHardy | 03cc45c | 2007-03-23 11:29:11 -0700 | [diff] [blame] | 732 | diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | if (cl->level >= level) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 734 | if (cl->level == level) | 
|  | 735 | cl->xstats.lends++; | 
|  | 736 | HTB_ACCNT(tokens, buffer, rate); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | } else { | 
|  | 738 | cl->xstats.borrows++; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 739 | cl->tokens += diff;	/* we moved t_c; update tokens */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | } | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 741 | HTB_ACCNT(ctokens, cbuffer, ceil); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | cl->t_c = q->now; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 744 | old_mode = cl->cmode; | 
|  | 745 | diff = 0; | 
|  | 746 | htb_change_class_mode(q, cl, &diff); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | if (old_mode != cl->cmode) { | 
|  | 748 | if (old_mode != HTB_CAN_SEND) | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 749 | htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | if (cl->cmode != HTB_CAN_SEND) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 751 | htb_add_to_wait_tree(q, cl, diff); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | #ifdef HTB_RATECM | 
|  | 754 | /* update rate counters */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 755 | cl->sum_bytes += bytes; | 
|  | 756 | cl->sum_packets++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | #endif | 
|  | 758 |  | 
|  | 759 | /* update byte stats except for leaves which are already updated */ | 
|  | 760 | if (cl->level) { | 
|  | 761 | cl->bstats.bytes += bytes; | 
|  | 762 | cl->bstats.packets++; | 
|  | 763 | } | 
|  | 764 | cl = cl->parent; | 
|  | 765 | } | 
|  | 766 | } | 
|  | 767 |  | 
|  | 768 | /** | 
|  | 769 | * htb_do_events - make mode changes to classes at the level | 
|  | 770 | * | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 771 | * Scans event queue for pending events and applies them. Returns time of | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | * next pending event (0 for no event in pq). | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 773 | * Note: Applied are events whose have cl->pq_key <= q->now. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | */ | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 775 | static psched_time_t htb_do_events(struct htb_sched *q, int level) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | { | 
|  | 777 | int i; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 778 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | for (i = 0; i < 500; i++) { | 
|  | 780 | struct htb_class *cl; | 
|  | 781 | long diff; | 
| Akinbou Mita | 30bdbe3 | 2006-10-12 01:52:05 -0700 | [diff] [blame] | 782 | struct rb_node *p = rb_first(&q->wait_pq[level]); | 
|  | 783 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 784 | if (!p) | 
|  | 785 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 |  | 
|  | 787 | cl = rb_entry(p, struct htb_class, pq_node); | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 788 | if (cl->pq_key > q->now) | 
|  | 789 | return cl->pq_key; | 
|  | 790 |  | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 791 | htb_safe_rb_erase(p, q->wait_pq + level); | 
| Patrick McHardy | 03cc45c | 2007-03-23 11:29:11 -0700 | [diff] [blame] | 792 | diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 793 | htb_change_class_mode(q, cl, &diff); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | if (cl->cmode != HTB_CAN_SEND) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 795 | htb_add_to_wait_tree(q, cl, diff); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | } | 
|  | 797 | if (net_ratelimit()) | 
|  | 798 | printk(KERN_WARNING "htb: too many events !\n"); | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 799 | return q->now + PSCHED_TICKS_PER_SEC / 10; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | } | 
|  | 801 |  | 
|  | 802 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL | 
|  | 803 | is no such one exists. */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 804 | static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, | 
|  | 805 | u32 id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | { | 
|  | 807 | struct rb_node *r = NULL; | 
|  | 808 | while (n) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 809 | struct htb_class *cl = | 
|  | 810 | rb_entry(n, struct htb_class, node[prio]); | 
|  | 811 | if (id == cl->classid) | 
|  | 812 | return n; | 
|  | 813 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | if (id > cl->classid) { | 
|  | 815 | n = n->rb_right; | 
|  | 816 | } else { | 
|  | 817 | r = n; | 
|  | 818 | n = n->rb_left; | 
|  | 819 | } | 
|  | 820 | } | 
|  | 821 | return r; | 
|  | 822 | } | 
|  | 823 |  | 
|  | 824 | /** | 
|  | 825 | * htb_lookup_leaf - returns next leaf class in DRR order | 
|  | 826 | * | 
|  | 827 | * Find leaf where current feed pointers points to. | 
|  | 828 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 829 | static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, | 
|  | 830 | struct rb_node **pptr, u32 * pid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | { | 
|  | 832 | int i; | 
|  | 833 | struct { | 
|  | 834 | struct rb_node *root; | 
|  | 835 | struct rb_node **pptr; | 
|  | 836 | u32 *pid; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 837 | } stk[TC_HTB_MAXDEPTH], *sp = stk; | 
|  | 838 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | BUG_TRAP(tree->rb_node); | 
|  | 840 | sp->root = tree->rb_node; | 
|  | 841 | sp->pptr = pptr; | 
|  | 842 | sp->pid = pid; | 
|  | 843 |  | 
|  | 844 | for (i = 0; i < 65535; i++) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 845 | if (!*sp->pptr && *sp->pid) { | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 846 | /* ptr was invalidated but id is valid - try to recover | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | the original or next ptr */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 848 | *sp->pptr = | 
|  | 849 | htb_id_find_next_upper(prio, sp->root, *sp->pid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 | } | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 851 | *sp->pid = 0;	/* ptr is valid now so that remove this hint as it | 
|  | 852 | can become out of date quickly */ | 
|  | 853 | if (!*sp->pptr) {	/* we are at right end; rewind & go up */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | *sp->pptr = sp->root; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 855 | while ((*sp->pptr)->rb_left) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | *sp->pptr = (*sp->pptr)->rb_left; | 
|  | 857 | if (sp > stk) { | 
|  | 858 | sp--; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 859 | BUG_TRAP(*sp->pptr); | 
|  | 860 | if (!*sp->pptr) | 
|  | 861 | return NULL; | 
|  | 862 | htb_next_rb_node(sp->pptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | } | 
|  | 864 | } else { | 
|  | 865 | struct htb_class *cl; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 866 | cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); | 
|  | 867 | if (!cl->level) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | return cl; | 
|  | 869 | (++sp)->root = cl->un.inner.feed[prio].rb_node; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 870 | sp->pptr = cl->un.inner.ptr + prio; | 
|  | 871 | sp->pid = cl->un.inner.last_ptr_id + prio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | } | 
|  | 873 | } | 
|  | 874 | BUG_TRAP(0); | 
|  | 875 | return NULL; | 
|  | 876 | } | 
|  | 877 |  | 
|  | 878 | /* dequeues packet at given priority and level; call only if | 
|  | 879 | you are sure that there is active class at prio/level */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 880 | static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, | 
|  | 881 | int level) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | { | 
|  | 883 | struct sk_buff *skb = NULL; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 884 | struct htb_class *cl, *start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | /* look initial class up in the row */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 886 | start = cl = htb_lookup_leaf(q->row[level] + prio, prio, | 
|  | 887 | q->ptr[level] + prio, | 
|  | 888 | q->last_ptr_id[level] + prio); | 
|  | 889 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 | do { | 
|  | 891 | next: | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 892 | BUG_TRAP(cl); | 
|  | 893 | if (!cl) | 
|  | 894 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 |  | 
|  | 896 | /* class can be empty - it is unlikely but can be true if leaf | 
|  | 897 | qdisc drops packets in enqueue routine or if someone used | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 898 | graft operation on the leaf since last dequeue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | simply deactivate and skip such class */ | 
|  | 900 | if (unlikely(cl->un.leaf.q->q.qlen == 0)) { | 
|  | 901 | struct htb_class *next; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 902 | htb_deactivate(q, cl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 |  | 
|  | 904 | /* row/level might become empty */ | 
|  | 905 | if ((q->row_mask[level] & (1 << prio)) == 0) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 906 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 908 | next = htb_lookup_leaf(q->row[level] + prio, | 
|  | 909 | prio, q->ptr[level] + prio, | 
|  | 910 | q->last_ptr_id[level] + prio); | 
|  | 911 |  | 
|  | 912 | if (cl == start)	/* fix start if we just deleted it */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | start = next; | 
|  | 914 | cl = next; | 
|  | 915 | goto next; | 
|  | 916 | } | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 917 |  | 
|  | 918 | skb = cl->un.leaf.q->dequeue(cl->un.leaf.q); | 
|  | 919 | if (likely(skb != NULL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | break; | 
|  | 921 | if (!cl->warned) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 922 | printk(KERN_WARNING | 
|  | 923 | "htb: class %X isn't work conserving ?!\n", | 
|  | 924 | cl->classid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | cl->warned = 1; | 
|  | 926 | } | 
|  | 927 | q->nwc_hit++; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 928 | htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> | 
|  | 929 | ptr[0]) + prio); | 
|  | 930 | cl = htb_lookup_leaf(q->row[level] + prio, prio, | 
|  | 931 | q->ptr[level] + prio, | 
|  | 932 | q->last_ptr_id[level] + prio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 |  | 
|  | 934 | } while (cl != start); | 
|  | 935 |  | 
|  | 936 | if (likely(skb != NULL)) { | 
|  | 937 | if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | cl->un.leaf.deficit[level] += cl->un.leaf.quantum; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 939 | htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> | 
|  | 940 | ptr[0]) + prio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 | } | 
|  | 942 | /* this used to be after charge_class but this constelation | 
|  | 943 | gives us slightly better performance */ | 
|  | 944 | if (!cl->un.leaf.q->q.qlen) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 945 | htb_deactivate(q, cl); | 
|  | 946 | htb_charge_class(q, cl, level, skb->len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | } | 
|  | 948 | return skb; | 
|  | 949 | } | 
|  | 950 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 951 | static struct sk_buff *htb_dequeue(struct Qdisc *sch) | 
|  | 952 | { | 
|  | 953 | struct sk_buff *skb = NULL; | 
|  | 954 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 955 | int level; | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 956 | psched_time_t next_event; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 |  | 
|  | 958 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 959 | skb = __skb_dequeue(&q->direct_queue); | 
|  | 960 | if (skb != NULL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | sch->flags &= ~TCQ_F_THROTTLED; | 
|  | 962 | sch->q.qlen--; | 
|  | 963 | return skb; | 
|  | 964 | } | 
|  | 965 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 966 | if (!sch->q.qlen) | 
|  | 967 | goto fin; | 
| Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 968 | q->now = psched_get_time(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 |  | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 970 | next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | q->nwc_hit = 0; | 
|  | 972 | for (level = 0; level < TC_HTB_MAXDEPTH; level++) { | 
|  | 973 | /* common case optimization - skip event handler quickly */ | 
|  | 974 | int m; | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 975 | psched_time_t event; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 976 |  | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 977 | if (q->now >= q->near_ev_cache[level]) { | 
|  | 978 | event = htb_do_events(q, level); | 
|  | 979 | q->near_ev_cache[level] = event ? event : | 
|  | 980 | PSCHED_TICKS_PER_SEC; | 
|  | 981 | } else | 
|  | 982 | event = q->near_ev_cache[level]; | 
|  | 983 |  | 
|  | 984 | if (event && next_event > event) | 
|  | 985 | next_event = event; | 
|  | 986 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | m = ~q->row_mask[level]; | 
|  | 988 | while (m != (int)(-1)) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 989 | int prio = ffz(m); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 990 | m |= 1 << prio; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 991 | skb = htb_dequeue_tree(q, prio, level); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | if (likely(skb != NULL)) { | 
|  | 993 | sch->q.qlen--; | 
|  | 994 | sch->flags &= ~TCQ_F_THROTTLED; | 
|  | 995 | goto fin; | 
|  | 996 | } | 
|  | 997 | } | 
|  | 998 | } | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 999 | sch->qstats.overlimits++; | 
|  | 1000 | qdisc_watchdog_schedule(&q->watchdog, next_event); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 | fin: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | return skb; | 
|  | 1003 | } | 
|  | 1004 |  | 
|  | 1005 | /* try to drop from each class (by prio) until one succeed */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1006 | static unsigned int htb_drop(struct Qdisc *sch) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | { | 
|  | 1008 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 1009 | int prio; | 
|  | 1010 |  | 
|  | 1011 | for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) { | 
|  | 1012 | struct list_head *p; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1013 | list_for_each(p, q->drops + prio) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 | struct htb_class *cl = list_entry(p, struct htb_class, | 
|  | 1015 | un.leaf.drop_list); | 
|  | 1016 | unsigned int len; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1017 | if (cl->un.leaf.q->ops->drop && | 
|  | 1018 | (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | sch->q.qlen--; | 
|  | 1020 | if (!cl->un.leaf.q->q.qlen) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1021 | htb_deactivate(q, cl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1022 | return len; | 
|  | 1023 | } | 
|  | 1024 | } | 
|  | 1025 | } | 
|  | 1026 | return 0; | 
|  | 1027 | } | 
|  | 1028 |  | 
|  | 1029 | /* reset all classes */ | 
|  | 1030 | /* always caled under BH & queue lock */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1031 | static void htb_reset(struct Qdisc *sch) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | { | 
|  | 1033 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 1034 | int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 |  | 
|  | 1036 | for (i = 0; i < HTB_HSIZE; i++) { | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 1037 | struct hlist_node *p; | 
|  | 1038 | struct htb_class *cl; | 
|  | 1039 |  | 
|  | 1040 | hlist_for_each_entry(cl, p, q->hash + i, hlist) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1041 | if (cl->level) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1042 | memset(&cl->un.inner, 0, sizeof(cl->un.inner)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | else { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1044 | if (cl->un.leaf.q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1045 | qdisc_reset(cl->un.leaf.q); | 
|  | 1046 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); | 
|  | 1047 | } | 
|  | 1048 | cl->prio_activity = 0; | 
|  | 1049 | cl->cmode = HTB_CAN_SEND; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 |  | 
|  | 1051 | } | 
|  | 1052 | } | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 1053 | qdisc_watchdog_cancel(&q->watchdog); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | __skb_queue_purge(&q->direct_queue); | 
|  | 1055 | sch->q.qlen = 0; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1056 | memset(q->row, 0, sizeof(q->row)); | 
|  | 1057 | memset(q->row_mask, 0, sizeof(q->row_mask)); | 
|  | 1058 | memset(q->wait_pq, 0, sizeof(q->wait_pq)); | 
|  | 1059 | memset(q->ptr, 0, sizeof(q->ptr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1060 | for (i = 0; i < TC_HTB_NUMPRIO; i++) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1061 | INIT_LIST_HEAD(q->drops + i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | } | 
|  | 1063 |  | 
|  | 1064 | static int htb_init(struct Qdisc *sch, struct rtattr *opt) | 
|  | 1065 | { | 
|  | 1066 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 1067 | struct rtattr *tb[TCA_HTB_INIT]; | 
|  | 1068 | struct tc_htb_glob *gopt; | 
|  | 1069 | int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 | if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) || | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1071 | tb[TCA_HTB_INIT - 1] == NULL || | 
|  | 1072 | RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); | 
|  | 1074 | return -EINVAL; | 
|  | 1075 | } | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1076 | gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 | if (gopt->version != HTB_VER >> 16) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1078 | printk(KERN_ERR | 
|  | 1079 | "HTB: need tc/htb version %d (minor is %d), you have %d\n", | 
|  | 1080 | HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 | return -EINVAL; | 
|  | 1082 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 |  | 
|  | 1084 | INIT_LIST_HEAD(&q->root); | 
|  | 1085 | for (i = 0; i < HTB_HSIZE; i++) | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 1086 | INIT_HLIST_HEAD(q->hash + i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | for (i = 0; i < TC_HTB_NUMPRIO; i++) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1088 | INIT_LIST_HEAD(q->drops + i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 |  | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 1090 | qdisc_watchdog_init(&q->watchdog, sch); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1091 | skb_queue_head_init(&q->direct_queue); | 
|  | 1092 |  | 
|  | 1093 | q->direct_qlen = sch->dev->tx_queue_len; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1094 | if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | q->direct_qlen = 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 |  | 
|  | 1097 | #ifdef HTB_RATECM | 
|  | 1098 | init_timer(&q->rttim); | 
|  | 1099 | q->rttim.function = htb_rate_timer; | 
|  | 1100 | q->rttim.data = (unsigned long)sch; | 
|  | 1101 | q->rttim.expires = jiffies + HZ; | 
|  | 1102 | add_timer(&q->rttim); | 
|  | 1103 | #endif | 
|  | 1104 | if ((q->rate2quantum = gopt->rate2quantum) < 1) | 
|  | 1105 | q->rate2quantum = 1; | 
|  | 1106 | q->defcls = gopt->defcls; | 
|  | 1107 |  | 
|  | 1108 | return 0; | 
|  | 1109 | } | 
|  | 1110 |  | 
|  | 1111 | static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | 
|  | 1112 | { | 
|  | 1113 | struct htb_sched *q = qdisc_priv(sch); | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1114 | unsigned char *b = skb_tail_pointer(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1115 | struct rtattr *rta; | 
|  | 1116 | struct tc_htb_glob gopt; | 
| Stephen Hemminger | 9ac961e | 2006-08-10 23:33:16 -0700 | [diff] [blame] | 1117 | spin_lock_bh(&sch->dev->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | gopt.direct_pkts = q->direct_pkts; | 
|  | 1119 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | gopt.version = HTB_VER; | 
|  | 1121 | gopt.rate2quantum = q->rate2quantum; | 
|  | 1122 | gopt.defcls = q->defcls; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 1123 | gopt.debug = 0; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1124 | rta = (struct rtattr *)b; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1125 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 
|  | 1126 | RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1127 | rta->rta_len = skb_tail_pointer(skb) - b; | 
| Stephen Hemminger | 9ac961e | 2006-08-10 23:33:16 -0700 | [diff] [blame] | 1128 | spin_unlock_bh(&sch->dev->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | return skb->len; | 
|  | 1130 | rtattr_failure: | 
| Stephen Hemminger | 9ac961e | 2006-08-10 23:33:16 -0700 | [diff] [blame] | 1131 | spin_unlock_bh(&sch->dev->queue_lock); | 
| Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 1132 | nlmsg_trim(skb, skb_tail_pointer(skb)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | return -1; | 
|  | 1134 | } | 
|  | 1135 |  | 
|  | 1136 | static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1137 | struct sk_buff *skb, struct tcmsg *tcm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1139 | struct htb_class *cl = (struct htb_class *)arg; | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1140 | unsigned char *b = skb_tail_pointer(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 | struct rtattr *rta; | 
|  | 1142 | struct tc_htb_opt opt; | 
|  | 1143 |  | 
| Stephen Hemminger | 9ac961e | 2006-08-10 23:33:16 -0700 | [diff] [blame] | 1144 | spin_lock_bh(&sch->dev->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT; | 
|  | 1146 | tcm->tcm_handle = cl->classid; | 
|  | 1147 | if (!cl->level && cl->un.leaf.q) | 
|  | 1148 | tcm->tcm_info = cl->un.leaf.q->handle; | 
|  | 1149 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1150 | rta = (struct rtattr *)b; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | RTA_PUT(skb, TCA_OPTIONS, 0, NULL); | 
|  | 1152 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1153 | memset(&opt, 0, sizeof(opt)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1155 | opt.rate = cl->rate->rate; | 
|  | 1156 | opt.buffer = cl->buffer; | 
|  | 1157 | opt.ceil = cl->ceil->rate; | 
|  | 1158 | opt.cbuffer = cl->cbuffer; | 
|  | 1159 | opt.quantum = cl->un.leaf.quantum; | 
|  | 1160 | opt.prio = cl->un.leaf.prio; | 
|  | 1161 | opt.level = cl->level; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 | RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1163 | rta->rta_len = skb_tail_pointer(skb) - b; | 
| Stephen Hemminger | 9ac961e | 2006-08-10 23:33:16 -0700 | [diff] [blame] | 1164 | spin_unlock_bh(&sch->dev->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 | return skb->len; | 
|  | 1166 | rtattr_failure: | 
| Stephen Hemminger | 9ac961e | 2006-08-10 23:33:16 -0700 | [diff] [blame] | 1167 | spin_unlock_bh(&sch->dev->queue_lock); | 
| Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 1168 | nlmsg_trim(skb, b); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | return -1; | 
|  | 1170 | } | 
|  | 1171 |  | 
|  | 1172 | static int | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1173 | htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1174 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1175 | struct htb_class *cl = (struct htb_class *)arg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 |  | 
|  | 1177 | #ifdef HTB_RATECM | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1178 | cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE); | 
|  | 1179 | cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1180 | #endif | 
|  | 1181 |  | 
|  | 1182 | if (!cl->level && cl->un.leaf.q) | 
|  | 1183 | cl->qstats.qlen = cl->un.leaf.q->q.qlen; | 
|  | 1184 | cl->xstats.tokens = cl->tokens; | 
|  | 1185 | cl->xstats.ctokens = cl->ctokens; | 
|  | 1186 |  | 
|  | 1187 | if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || | 
|  | 1188 | gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || | 
|  | 1189 | gnet_stats_copy_queue(d, &cl->qstats) < 0) | 
|  | 1190 | return -1; | 
|  | 1191 |  | 
|  | 1192 | return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); | 
|  | 1193 | } | 
|  | 1194 |  | 
|  | 1195 | static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1196 | struct Qdisc **old) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1198 | struct htb_class *cl = (struct htb_class *)arg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 |  | 
|  | 1200 | if (cl && !cl->level) { | 
| Patrick McHardy | 9f9afec | 2006-11-29 17:35:18 -0800 | [diff] [blame] | 1201 | if (new == NULL && | 
|  | 1202 | (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 1203 | cl->classid)) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1204 | == NULL) | 
|  | 1205 | return -ENOBUFS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | sch_tree_lock(sch); | 
|  | 1207 | if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) { | 
| Patrick McHardy | 256d61b | 2006-11-29 17:37:05 -0800 | [diff] [blame] | 1208 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 | qdisc_reset(*old); | 
|  | 1210 | } | 
|  | 1211 | sch_tree_unlock(sch); | 
|  | 1212 | return 0; | 
|  | 1213 | } | 
|  | 1214 | return -ENOENT; | 
|  | 1215 | } | 
|  | 1216 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1217 | static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1219 | struct htb_class *cl = (struct htb_class *)arg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | return (cl && !cl->level) ? cl->un.leaf.q : NULL; | 
|  | 1221 | } | 
|  | 1222 |  | 
| Patrick McHardy | 256d61b | 2006-11-29 17:37:05 -0800 | [diff] [blame] | 1223 | static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) | 
|  | 1224 | { | 
|  | 1225 | struct htb_class *cl = (struct htb_class *)arg; | 
|  | 1226 |  | 
|  | 1227 | if (cl->un.leaf.q->q.qlen == 0) | 
|  | 1228 | htb_deactivate(qdisc_priv(sch), cl); | 
|  | 1229 | } | 
|  | 1230 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | static unsigned long htb_get(struct Qdisc *sch, u32 classid) | 
|  | 1232 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1233 | struct htb_class *cl = htb_find(classid, sch); | 
|  | 1234 | if (cl) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 | cl->refcnt++; | 
|  | 1236 | return (unsigned long)cl; | 
|  | 1237 | } | 
|  | 1238 |  | 
| Jarek Poplawski | 160d5e1 | 2006-12-08 00:26:56 -0800 | [diff] [blame] | 1239 | static inline int htb_parent_last_child(struct htb_class *cl) | 
|  | 1240 | { | 
|  | 1241 | if (!cl->parent) | 
|  | 1242 | /* the root class */ | 
|  | 1243 | return 0; | 
|  | 1244 |  | 
|  | 1245 | if (!(cl->parent->children.next == &cl->sibling && | 
|  | 1246 | cl->parent->children.prev == &cl->sibling)) | 
|  | 1247 | /* not the last child */ | 
|  | 1248 | return 0; | 
|  | 1249 |  | 
|  | 1250 | return 1; | 
|  | 1251 | } | 
|  | 1252 |  | 
|  | 1253 | static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q) | 
|  | 1254 | { | 
|  | 1255 | struct htb_class *parent = cl->parent; | 
|  | 1256 |  | 
|  | 1257 | BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity); | 
|  | 1258 |  | 
|  | 1259 | parent->level = 0; | 
|  | 1260 | memset(&parent->un.inner, 0, sizeof(parent->un.inner)); | 
|  | 1261 | INIT_LIST_HEAD(&parent->un.leaf.drop_list); | 
|  | 1262 | parent->un.leaf.q = new_q ? new_q : &noop_qdisc; | 
|  | 1263 | parent->un.leaf.quantum = parent->quantum; | 
|  | 1264 | parent->un.leaf.prio = parent->prio; | 
|  | 1265 | parent->tokens = parent->buffer; | 
|  | 1266 | parent->ctokens = parent->cbuffer; | 
| Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 1267 | parent->t_c = psched_get_time(); | 
| Jarek Poplawski | 160d5e1 | 2006-12-08 00:26:56 -0800 | [diff] [blame] | 1268 | parent->cmode = HTB_CAN_SEND; | 
|  | 1269 | } | 
|  | 1270 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1271 | static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | { | 
|  | 1273 | struct htb_sched *q = qdisc_priv(sch); | 
| Patrick McHardy | 814a175e | 2006-11-29 17:34:50 -0800 | [diff] [blame] | 1274 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 | if (!cl->level) { | 
|  | 1276 | BUG_TRAP(cl->un.leaf.q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1277 | qdisc_destroy(cl->un.leaf.q); | 
|  | 1278 | } | 
|  | 1279 | qdisc_put_rtab(cl->rate); | 
|  | 1280 | qdisc_put_rtab(cl->ceil); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1281 |  | 
| Patrick McHardy | a48b5a6 | 2007-03-23 11:29:43 -0700 | [diff] [blame] | 1282 | tcf_destroy_chain(cl->filter_list); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1283 |  | 
|  | 1284 | while (!list_empty(&cl->children)) | 
|  | 1285 | htb_destroy_class(sch, list_entry(cl->children.next, | 
|  | 1286 | struct htb_class, sibling)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1287 |  | 
|  | 1288 | /* note: this delete may happen twice (see htb_delete) */ | 
| Stephen Hemminger | da33e3e | 2006-11-07 14:54:46 -0800 | [diff] [blame] | 1289 | hlist_del_init(&cl->hlist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | list_del(&cl->sibling); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1291 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | if (cl->prio_activity) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1293 | htb_deactivate(q, cl); | 
|  | 1294 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | if (cl->cmode != HTB_CAN_SEND) | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 1296 | htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1297 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1298 | kfree(cl); | 
|  | 1299 | } | 
|  | 1300 |  | 
|  | 1301 | /* always caled under BH & queue lock */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1302 | static void htb_destroy(struct Qdisc *sch) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | { | 
|  | 1304 | struct htb_sched *q = qdisc_priv(sch); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 |  | 
| Patrick McHardy | fb983d4 | 2007-03-16 01:22:39 -0700 | [diff] [blame] | 1306 | qdisc_watchdog_cancel(&q->watchdog); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1307 | #ifdef HTB_RATECM | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1308 | del_timer_sync(&q->rttim); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1309 | #endif | 
|  | 1310 | /* This line used to be after htb_destroy_class call below | 
| YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 1311 | and surprisingly it worked in 2.4. But it must precede it | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1312 | because filter need its target class alive to be able to call | 
|  | 1313 | unbind_filter on it (without Oops). */ | 
| Patrick McHardy | a48b5a6 | 2007-03-23 11:29:43 -0700 | [diff] [blame] | 1314 | tcf_destroy_chain(q->filter_list); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1315 |  | 
|  | 1316 | while (!list_empty(&q->root)) | 
|  | 1317 | htb_destroy_class(sch, list_entry(q->root.next, | 
|  | 1318 | struct htb_class, sibling)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 |  | 
|  | 1320 | __skb_queue_purge(&q->direct_queue); | 
|  | 1321 | } | 
|  | 1322 |  | 
|  | 1323 | static int htb_delete(struct Qdisc *sch, unsigned long arg) | 
|  | 1324 | { | 
|  | 1325 | struct htb_sched *q = qdisc_priv(sch); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1326 | struct htb_class *cl = (struct htb_class *)arg; | 
| Patrick McHardy | 256d61b | 2006-11-29 17:37:05 -0800 | [diff] [blame] | 1327 | unsigned int qlen; | 
| Jarek Poplawski | 160d5e1 | 2006-12-08 00:26:56 -0800 | [diff] [blame] | 1328 | struct Qdisc *new_q = NULL; | 
|  | 1329 | int last_child = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1330 |  | 
|  | 1331 | // TODO: why don't allow to delete subtree ? references ? does | 
|  | 1332 | // tc subsys quarantee us that in htb_destroy it holds no class | 
|  | 1333 | // refs so that we can remove children safely there ? | 
|  | 1334 | if (!list_empty(&cl->children) || cl->filter_cnt) | 
|  | 1335 | return -EBUSY; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1336 |  | 
| Jarek Poplawski | 160d5e1 | 2006-12-08 00:26:56 -0800 | [diff] [blame] | 1337 | if (!cl->level && htb_parent_last_child(cl)) { | 
|  | 1338 | new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, | 
|  | 1339 | cl->parent->classid); | 
|  | 1340 | last_child = 1; | 
|  | 1341 | } | 
|  | 1342 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1343 | sch_tree_lock(sch); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1344 |  | 
| Patrick McHardy | 814a175e | 2006-11-29 17:34:50 -0800 | [diff] [blame] | 1345 | if (!cl->level) { | 
| Patrick McHardy | 256d61b | 2006-11-29 17:37:05 -0800 | [diff] [blame] | 1346 | qlen = cl->un.leaf.q->q.qlen; | 
| Patrick McHardy | 814a175e | 2006-11-29 17:34:50 -0800 | [diff] [blame] | 1347 | qdisc_reset(cl->un.leaf.q); | 
| Patrick McHardy | 256d61b | 2006-11-29 17:37:05 -0800 | [diff] [blame] | 1348 | qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); | 
| Patrick McHardy | 814a175e | 2006-11-29 17:34:50 -0800 | [diff] [blame] | 1349 | } | 
|  | 1350 |  | 
| Patrick McHardy | c38c83c | 2007-03-27 14:04:24 -0700 | [diff] [blame] | 1351 | /* delete from hash and active; remainder in destroy_class */ | 
|  | 1352 | hlist_del_init(&cl->hlist); | 
|  | 1353 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | if (cl->prio_activity) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1355 | htb_deactivate(q, cl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1356 |  | 
| Jarek Poplawski | 160d5e1 | 2006-12-08 00:26:56 -0800 | [diff] [blame] | 1357 | if (last_child) | 
|  | 1358 | htb_parent_to_leaf(cl, new_q); | 
|  | 1359 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | if (--cl->refcnt == 0) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1361 | htb_destroy_class(sch, cl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 |  | 
|  | 1363 | sch_tree_unlock(sch); | 
|  | 1364 | return 0; | 
|  | 1365 | } | 
|  | 1366 |  | 
|  | 1367 | static void htb_put(struct Qdisc *sch, unsigned long arg) | 
|  | 1368 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1369 | struct htb_class *cl = (struct htb_class *)arg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 |  | 
|  | 1371 | if (--cl->refcnt == 0) | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1372 | htb_destroy_class(sch, cl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1373 | } | 
|  | 1374 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1375 | static int htb_change_class(struct Qdisc *sch, u32 classid, | 
|  | 1376 | u32 parentid, struct rtattr **tca, | 
|  | 1377 | unsigned long *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1378 | { | 
|  | 1379 | int err = -EINVAL; | 
|  | 1380 | struct htb_sched *q = qdisc_priv(sch); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1381 | struct htb_class *cl = (struct htb_class *)*arg, *parent; | 
|  | 1382 | struct rtattr *opt = tca[TCA_OPTIONS - 1]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1383 | struct qdisc_rate_table *rtab = NULL, *ctab = NULL; | 
|  | 1384 | struct rtattr *tb[TCA_HTB_RTAB]; | 
|  | 1385 | struct tc_htb_opt *hopt; | 
|  | 1386 |  | 
|  | 1387 | /* extract all subattrs from opt attr */ | 
|  | 1388 | if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) || | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1389 | tb[TCA_HTB_PARMS - 1] == NULL || | 
|  | 1390 | RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 | goto failure; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1393 | parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch); | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 1394 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1395 | hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1397 | rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]); | 
|  | 1398 | ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]); | 
|  | 1399 | if (!rtab || !ctab) | 
|  | 1400 | goto failure; | 
|  | 1401 |  | 
|  | 1402 | if (!cl) {		/* new class */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | struct Qdisc *new_q; | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 1404 | int prio; | 
|  | 1405 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1406 | /* check for valid classid */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1407 | if (!classid || TC_H_MAJ(classid ^ sch->handle) | 
|  | 1408 | || htb_find(classid, sch)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1409 | goto failure; | 
|  | 1410 |  | 
|  | 1411 | /* check maximal depth */ | 
|  | 1412 | if (parent && parent->parent && parent->parent->level < 2) { | 
|  | 1413 | printk(KERN_ERR "htb: tree is too deep\n"); | 
|  | 1414 | goto failure; | 
|  | 1415 | } | 
|  | 1416 | err = -ENOBUFS; | 
| Panagiotis Issaris | 0da974f | 2006-07-21 14:51:30 -0700 | [diff] [blame] | 1417 | if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1418 | goto failure; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1419 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 | cl->refcnt = 1; | 
|  | 1421 | INIT_LIST_HEAD(&cl->sibling); | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 1422 | INIT_HLIST_NODE(&cl->hlist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | INIT_LIST_HEAD(&cl->children); | 
|  | 1424 | INIT_LIST_HEAD(&cl->un.leaf.drop_list); | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 1425 | RB_CLEAR_NODE(&cl->pq_node); | 
|  | 1426 |  | 
|  | 1427 | for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) | 
|  | 1428 | RB_CLEAR_NODE(&cl->node[prio]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 |  | 
|  | 1430 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) | 
|  | 1431 | so that can't be used inside of sch_tree_lock | 
|  | 1432 | -- thanks to Karlis Peisenieks */ | 
| Patrick McHardy | 9f9afec | 2006-11-29 17:35:18 -0800 | [diff] [blame] | 1433 | new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | sch_tree_lock(sch); | 
|  | 1435 | if (parent && !parent->level) { | 
| Patrick McHardy | 256d61b | 2006-11-29 17:37:05 -0800 | [diff] [blame] | 1436 | unsigned int qlen = parent->un.leaf.q->q.qlen; | 
|  | 1437 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1438 | /* turn parent into inner node */ | 
| Patrick McHardy | 256d61b | 2006-11-29 17:37:05 -0800 | [diff] [blame] | 1439 | qdisc_reset(parent->un.leaf.q); | 
|  | 1440 | qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1441 | qdisc_destroy(parent->un.leaf.q); | 
|  | 1442 | if (parent->prio_activity) | 
|  | 1443 | htb_deactivate(q, parent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1444 |  | 
|  | 1445 | /* remove from evt list because of level change */ | 
|  | 1446 | if (parent->cmode != HTB_CAN_SEND) { | 
| Stephen Hemminger | 3696f62 | 2006-08-10 23:36:01 -0700 | [diff] [blame] | 1447 | htb_safe_rb_erase(&parent->pq_node, q->wait_pq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1448 | parent->cmode = HTB_CAN_SEND; | 
|  | 1449 | } | 
|  | 1450 | parent->level = (parent->parent ? parent->parent->level | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1451 | : TC_HTB_MAXDEPTH) - 1; | 
|  | 1452 | memset(&parent->un.inner, 0, sizeof(parent->un.inner)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1453 | } | 
|  | 1454 | /* leaf (we) needs elementary qdisc */ | 
|  | 1455 | cl->un.leaf.q = new_q ? new_q : &noop_qdisc; | 
|  | 1456 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1457 | cl->classid = classid; | 
|  | 1458 | cl->parent = parent; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1459 |  | 
|  | 1460 | /* set class to be in HTB_CAN_SEND state */ | 
|  | 1461 | cl->tokens = hopt->buffer; | 
|  | 1462 | cl->ctokens = hopt->cbuffer; | 
| Patrick McHardy | 00c04af | 2007-03-16 01:23:02 -0700 | [diff] [blame] | 1463 | cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;	/* 1min */ | 
| Patrick McHardy | 3bebcda | 2007-03-23 11:29:25 -0700 | [diff] [blame] | 1464 | cl->t_c = psched_get_time(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1465 | cl->cmode = HTB_CAN_SEND; | 
|  | 1466 |  | 
|  | 1467 | /* attach to the hash list and parent's family */ | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 1468 | hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1469 | list_add_tail(&cl->sibling, | 
|  | 1470 | parent ? &parent->children : &q->root); | 
|  | 1471 | } else | 
|  | 1472 | sch_tree_lock(sch); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1473 |  | 
|  | 1474 | /* it used to be a nasty bug here, we have to check that node | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1475 | is really leaf before changing cl->un.leaf ! */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | if (!cl->level) { | 
|  | 1477 | cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum; | 
|  | 1478 | if (!hopt->quantum && cl->un.leaf.quantum < 1000) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1479 | printk(KERN_WARNING | 
|  | 1480 | "HTB: quantum of class %X is small. Consider r2q change.\n", | 
|  | 1481 | cl->classid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | cl->un.leaf.quantum = 1000; | 
|  | 1483 | } | 
|  | 1484 | if (!hopt->quantum && cl->un.leaf.quantum > 200000) { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1485 | printk(KERN_WARNING | 
|  | 1486 | "HTB: quantum of class %X is big. Consider r2q change.\n", | 
|  | 1487 | cl->classid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1488 | cl->un.leaf.quantum = 200000; | 
|  | 1489 | } | 
|  | 1490 | if (hopt->quantum) | 
|  | 1491 | cl->un.leaf.quantum = hopt->quantum; | 
|  | 1492 | if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO) | 
|  | 1493 | cl->un.leaf.prio = TC_HTB_NUMPRIO - 1; | 
| Jarek Poplawski | 160d5e1 | 2006-12-08 00:26:56 -0800 | [diff] [blame] | 1494 |  | 
|  | 1495 | /* backup for htb_parent_to_leaf */ | 
|  | 1496 | cl->quantum = cl->un.leaf.quantum; | 
|  | 1497 | cl->prio = cl->un.leaf.prio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | } | 
|  | 1499 |  | 
|  | 1500 | cl->buffer = hopt->buffer; | 
|  | 1501 | cl->cbuffer = hopt->cbuffer; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1502 | if (cl->rate) | 
|  | 1503 | qdisc_put_rtab(cl->rate); | 
|  | 1504 | cl->rate = rtab; | 
|  | 1505 | if (cl->ceil) | 
|  | 1506 | qdisc_put_rtab(cl->ceil); | 
|  | 1507 | cl->ceil = ctab; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1508 | sch_tree_unlock(sch); | 
|  | 1509 |  | 
|  | 1510 | *arg = (unsigned long)cl; | 
|  | 1511 | return 0; | 
|  | 1512 |  | 
|  | 1513 | failure: | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1514 | if (rtab) | 
|  | 1515 | qdisc_put_rtab(rtab); | 
|  | 1516 | if (ctab) | 
|  | 1517 | qdisc_put_rtab(ctab); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1518 | return err; | 
|  | 1519 | } | 
|  | 1520 |  | 
|  | 1521 | static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg) | 
|  | 1522 | { | 
|  | 1523 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 1524 | struct htb_class *cl = (struct htb_class *)arg; | 
|  | 1525 | struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 1526 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 | return fl; | 
|  | 1528 | } | 
|  | 1529 |  | 
|  | 1530 | static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1531 | u32 classid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 | { | 
|  | 1533 | struct htb_sched *q = qdisc_priv(sch); | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1534 | struct htb_class *cl = htb_find(classid, sch); | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 1535 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1536 | /*if (cl && !cl->level) return 0; | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1537 | The line above used to be there to prevent attaching filters to | 
|  | 1538 | leaves. But at least tc_index filter uses this just to get class | 
|  | 1539 | for other reasons so that we have to allow for it. | 
|  | 1540 | ---- | 
|  | 1541 | 19.6.2002 As Werner explained it is ok - bind filter is just | 
|  | 1542 | another way to "lock" the class - unlike "get" this lock can | 
|  | 1543 | be broken by class during destroy IIUC. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1544 | */ | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1545 | if (cl) | 
|  | 1546 | cl->filter_cnt++; | 
|  | 1547 | else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1548 | q->filter_cnt++; | 
|  | 1549 | return (unsigned long)cl; | 
|  | 1550 | } | 
|  | 1551 |  | 
|  | 1552 | static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) | 
|  | 1553 | { | 
|  | 1554 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 1555 | struct htb_class *cl = (struct htb_class *)arg; | 
| Stephen Hemminger | 3bf7295 | 2006-08-10 23:31:08 -0700 | [diff] [blame] | 1556 |  | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1557 | if (cl) | 
|  | 1558 | cl->filter_cnt--; | 
|  | 1559 | else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1560 | q->filter_cnt--; | 
|  | 1561 | } | 
|  | 1562 |  | 
|  | 1563 | static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) | 
|  | 1564 | { | 
|  | 1565 | struct htb_sched *q = qdisc_priv(sch); | 
|  | 1566 | int i; | 
|  | 1567 |  | 
|  | 1568 | if (arg->stop) | 
|  | 1569 | return; | 
|  | 1570 |  | 
|  | 1571 | for (i = 0; i < HTB_HSIZE; i++) { | 
| Stephen Hemminger | 0cef296 | 2006-08-10 23:35:38 -0700 | [diff] [blame] | 1572 | struct hlist_node *p; | 
|  | 1573 | struct htb_class *cl; | 
|  | 1574 |  | 
|  | 1575 | hlist_for_each_entry(cl, p, q->hash + i, hlist) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1576 | if (arg->count < arg->skip) { | 
|  | 1577 | arg->count++; | 
|  | 1578 | continue; | 
|  | 1579 | } | 
|  | 1580 | if (arg->fn(sch, (unsigned long)cl, arg) < 0) { | 
|  | 1581 | arg->stop = 1; | 
|  | 1582 | return; | 
|  | 1583 | } | 
|  | 1584 | arg->count++; | 
|  | 1585 | } | 
|  | 1586 | } | 
|  | 1587 | } | 
|  | 1588 |  | 
|  | 1589 | static struct Qdisc_class_ops htb_class_ops = { | 
|  | 1590 | .graft		=	htb_graft, | 
|  | 1591 | .leaf		=	htb_leaf, | 
| Patrick McHardy | 256d61b | 2006-11-29 17:37:05 -0800 | [diff] [blame] | 1592 | .qlen_notify	=	htb_qlen_notify, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1593 | .get		=	htb_get, | 
|  | 1594 | .put		=	htb_put, | 
|  | 1595 | .change		=	htb_change_class, | 
|  | 1596 | .delete		=	htb_delete, | 
|  | 1597 | .walk		=	htb_walk, | 
|  | 1598 | .tcf_chain	=	htb_find_tcf, | 
|  | 1599 | .bind_tcf	=	htb_bind_filter, | 
|  | 1600 | .unbind_tcf	=	htb_unbind_filter, | 
|  | 1601 | .dump		=	htb_dump_class, | 
|  | 1602 | .dump_stats	=	htb_dump_class_stats, | 
|  | 1603 | }; | 
|  | 1604 |  | 
|  | 1605 | static struct Qdisc_ops htb_qdisc_ops = { | 
|  | 1606 | .next		=	NULL, | 
|  | 1607 | .cl_ops		=	&htb_class_ops, | 
|  | 1608 | .id		=	"htb", | 
|  | 1609 | .priv_size	=	sizeof(struct htb_sched), | 
|  | 1610 | .enqueue	=	htb_enqueue, | 
|  | 1611 | .dequeue	=	htb_dequeue, | 
|  | 1612 | .requeue	=	htb_requeue, | 
|  | 1613 | .drop		=	htb_drop, | 
|  | 1614 | .init		=	htb_init, | 
|  | 1615 | .reset		=	htb_reset, | 
|  | 1616 | .destroy	=	htb_destroy, | 
|  | 1617 | .change		=	NULL /* htb_change */, | 
|  | 1618 | .dump		=	htb_dump, | 
|  | 1619 | .owner		=	THIS_MODULE, | 
|  | 1620 | }; | 
|  | 1621 |  | 
|  | 1622 | static int __init htb_module_init(void) | 
|  | 1623 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1624 | return register_qdisc(&htb_qdisc_ops); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1625 | } | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1626 | static void __exit htb_module_exit(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1627 | { | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1628 | unregister_qdisc(&htb_qdisc_ops); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | } | 
| Stephen Hemminger | 8799046 | 2006-08-10 23:35:16 -0700 | [diff] [blame] | 1630 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | module_init(htb_module_init) | 
|  | 1632 | module_exit(htb_module_exit) | 
|  | 1633 | MODULE_LICENSE("GPL"); |