| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * net/sched/sch_generic.c	Generic packet scheduler routines. | 
 | 3 |  * | 
 | 4 |  *		This program is free software; you can redistribute it and/or | 
 | 5 |  *		modify it under the terms of the GNU General Public License | 
 | 6 |  *		as published by the Free Software Foundation; either version | 
 | 7 |  *		2 of the License, or (at your option) any later version. | 
 | 8 |  * | 
 | 9 |  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 
 | 10 |  *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601 | 
 | 11 |  *              - Ingress support | 
 | 12 |  */ | 
 | 13 |  | 
 | 14 | #include <asm/uaccess.h> | 
 | 15 | #include <asm/system.h> | 
 | 16 | #include <linux/bitops.h> | 
 | 17 | #include <linux/config.h> | 
 | 18 | #include <linux/module.h> | 
 | 19 | #include <linux/types.h> | 
 | 20 | #include <linux/kernel.h> | 
 | 21 | #include <linux/sched.h> | 
 | 22 | #include <linux/string.h> | 
 | 23 | #include <linux/mm.h> | 
 | 24 | #include <linux/socket.h> | 
 | 25 | #include <linux/sockios.h> | 
 | 26 | #include <linux/in.h> | 
 | 27 | #include <linux/errno.h> | 
 | 28 | #include <linux/interrupt.h> | 
 | 29 | #include <linux/netdevice.h> | 
 | 30 | #include <linux/skbuff.h> | 
 | 31 | #include <linux/rtnetlink.h> | 
 | 32 | #include <linux/init.h> | 
 | 33 | #include <linux/rcupdate.h> | 
 | 34 | #include <linux/list.h> | 
 | 35 | #include <net/sock.h> | 
 | 36 | #include <net/pkt_sched.h> | 
 | 37 |  | 
 | 38 | /* Main transmission queue. */ | 
 | 39 |  | 
 | 40 | /* Main qdisc structure lock.  | 
 | 41 |  | 
 | 42 |    However, modifications | 
 | 43 |    to data, participating in scheduling must be additionally | 
 | 44 |    protected with dev->queue_lock spinlock. | 
 | 45 |  | 
 | 46 |    The idea is the following: | 
 | 47 |    - enqueue, dequeue are serialized via top level device | 
 | 48 |      spinlock dev->queue_lock. | 
 | 49 |    - tree walking is protected by read_lock_bh(qdisc_tree_lock) | 
 | 50 |      and this lock is used only in process context. | 
 | 51 |    - updates to tree are made under rtnl semaphore or | 
 | 52 |      from softirq context (__qdisc_destroy rcu-callback) | 
 | 53 |      hence this lock needs local bh disabling. | 
 | 54 |  | 
 | 55 |    qdisc_tree_lock must be grabbed BEFORE dev->queue_lock! | 
 | 56 |  */ | 
 | 57 | DEFINE_RWLOCK(qdisc_tree_lock); | 
 | 58 |  | 
 | 59 | void qdisc_lock_tree(struct net_device *dev) | 
 | 60 | { | 
 | 61 | 	write_lock_bh(&qdisc_tree_lock); | 
 | 62 | 	spin_lock_bh(&dev->queue_lock); | 
 | 63 | } | 
 | 64 |  | 
 | 65 | void qdisc_unlock_tree(struct net_device *dev) | 
 | 66 | { | 
 | 67 | 	spin_unlock_bh(&dev->queue_lock); | 
 | 68 | 	write_unlock_bh(&qdisc_tree_lock); | 
 | 69 | } | 
 | 70 |  | 
 | 71 | /*  | 
 | 72 |    dev->queue_lock serializes queue accesses for this device | 
 | 73 |    AND dev->qdisc pointer itself. | 
 | 74 |  | 
 | 75 |    dev->xmit_lock serializes accesses to device driver. | 
 | 76 |  | 
 | 77 |    dev->queue_lock and dev->xmit_lock are mutually exclusive, | 
 | 78 |    if one is grabbed, another must be free. | 
 | 79 |  */ | 
 | 80 |  | 
 | 81 |  | 
 | 82 | /* Kick device. | 
 | 83 |    Note, that this procedure can be called by a watchdog timer, so that | 
 | 84 |    we do not check dev->tbusy flag here. | 
 | 85 |  | 
 | 86 |    Returns:  0  - queue is empty. | 
 | 87 |             >0  - queue is not empty, but throttled. | 
 | 88 | 	    <0  - queue is not empty. Device is throttled, if dev->tbusy != 0. | 
 | 89 |  | 
 | 90 |    NOTE: Called under dev->queue_lock with locally disabled BH. | 
 | 91 | */ | 
 | 92 |  | 
 | 93 | int qdisc_restart(struct net_device *dev) | 
 | 94 | { | 
 | 95 | 	struct Qdisc *q = dev->qdisc; | 
 | 96 | 	struct sk_buff *skb; | 
 | 97 |  | 
 | 98 | 	/* Dequeue packet */ | 
 | 99 | 	if ((skb = q->dequeue(q)) != NULL) { | 
 | 100 | 		unsigned nolock = (dev->features & NETIF_F_LLTX); | 
 | 101 | 		/* | 
 | 102 | 		 * When the driver has LLTX set it does its own locking | 
 | 103 | 		 * in start_xmit. No need to add additional overhead by | 
 | 104 | 		 * locking again. These checks are worth it because | 
 | 105 | 		 * even uncongested locks can be quite expensive. | 
 | 106 | 		 * The driver can do trylock like here too, in case | 
 | 107 | 		 * of lock congestion it should return -1 and the packet | 
 | 108 | 		 * will be requeued. | 
 | 109 | 		 */ | 
 | 110 | 		if (!nolock) { | 
 | 111 | 			if (!spin_trylock(&dev->xmit_lock)) { | 
 | 112 | 			collision: | 
 | 113 | 				/* So, someone grabbed the driver. */ | 
 | 114 | 				 | 
 | 115 | 				/* It may be transient configuration error, | 
 | 116 | 				   when hard_start_xmit() recurses. We detect | 
 | 117 | 				   it by checking xmit owner and drop the | 
 | 118 | 				   packet when deadloop is detected. | 
 | 119 | 				*/ | 
 | 120 | 				if (dev->xmit_lock_owner == smp_processor_id()) { | 
 | 121 | 					kfree_skb(skb); | 
 | 122 | 					if (net_ratelimit()) | 
 | 123 | 						printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); | 
 | 124 | 					return -1; | 
 | 125 | 				} | 
 | 126 | 				__get_cpu_var(netdev_rx_stat).cpu_collision++; | 
 | 127 | 				goto requeue; | 
 | 128 | 			} | 
 | 129 | 			/* Remember that the driver is grabbed by us. */ | 
 | 130 | 			dev->xmit_lock_owner = smp_processor_id(); | 
 | 131 | 		} | 
 | 132 | 		 | 
 | 133 | 		{ | 
 | 134 | 			/* And release queue */ | 
 | 135 | 			spin_unlock(&dev->queue_lock); | 
 | 136 |  | 
 | 137 | 			if (!netif_queue_stopped(dev)) { | 
 | 138 | 				int ret; | 
 | 139 | 				if (netdev_nit) | 
 | 140 | 					dev_queue_xmit_nit(skb, dev); | 
 | 141 |  | 
 | 142 | 				ret = dev->hard_start_xmit(skb, dev); | 
 | 143 | 				if (ret == NETDEV_TX_OK) {  | 
 | 144 | 					if (!nolock) { | 
 | 145 | 						dev->xmit_lock_owner = -1; | 
 | 146 | 						spin_unlock(&dev->xmit_lock); | 
 | 147 | 					} | 
 | 148 | 					spin_lock(&dev->queue_lock); | 
 | 149 | 					return -1; | 
 | 150 | 				} | 
 | 151 | 				if (ret == NETDEV_TX_LOCKED && nolock) { | 
 | 152 | 					spin_lock(&dev->queue_lock); | 
 | 153 | 					goto collision;  | 
 | 154 | 				} | 
 | 155 | 			} | 
 | 156 |  | 
 | 157 | 			/* NETDEV_TX_BUSY - we need to requeue */ | 
 | 158 | 			/* Release the driver */ | 
 | 159 | 			if (!nolock) {  | 
 | 160 | 				dev->xmit_lock_owner = -1; | 
 | 161 | 				spin_unlock(&dev->xmit_lock); | 
 | 162 | 			}  | 
 | 163 | 			spin_lock(&dev->queue_lock); | 
 | 164 | 			q = dev->qdisc; | 
 | 165 | 		} | 
 | 166 |  | 
 | 167 | 		/* Device kicked us out :( | 
 | 168 | 		   This is possible in three cases: | 
 | 169 |  | 
 | 170 | 		   0. driver is locked | 
 | 171 | 		   1. fastroute is enabled | 
 | 172 | 		   2. device cannot determine busy state | 
 | 173 | 		      before start of transmission (f.e. dialout) | 
 | 174 | 		   3. device is buggy (ppp) | 
 | 175 | 		 */ | 
 | 176 |  | 
 | 177 | requeue: | 
 | 178 | 		q->ops->requeue(skb, q); | 
 | 179 | 		netif_schedule(dev); | 
 | 180 | 		return 1; | 
 | 181 | 	} | 
| Stephen Hemminger | 8cbe1d4 | 2005-05-03 16:24:03 -0700 | [diff] [blame] | 182 | 	BUG_ON((int) q->q.qlen < 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | 	return q->q.qlen; | 
 | 184 | } | 
 | 185 |  | 
 | 186 | static void dev_watchdog(unsigned long arg) | 
 | 187 | { | 
 | 188 | 	struct net_device *dev = (struct net_device *)arg; | 
 | 189 |  | 
 | 190 | 	spin_lock(&dev->xmit_lock); | 
 | 191 | 	if (dev->qdisc != &noop_qdisc) { | 
 | 192 | 		if (netif_device_present(dev) && | 
 | 193 | 		    netif_running(dev) && | 
 | 194 | 		    netif_carrier_ok(dev)) { | 
 | 195 | 			if (netif_queue_stopped(dev) && | 
 | 196 | 			    (jiffies - dev->trans_start) > dev->watchdog_timeo) { | 
 | 197 | 				printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); | 
 | 198 | 				dev->tx_timeout(dev); | 
 | 199 | 			} | 
 | 200 | 			if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) | 
 | 201 | 				dev_hold(dev); | 
 | 202 | 		} | 
 | 203 | 	} | 
 | 204 | 	spin_unlock(&dev->xmit_lock); | 
 | 205 |  | 
 | 206 | 	dev_put(dev); | 
 | 207 | } | 
 | 208 |  | 
 | 209 | static void dev_watchdog_init(struct net_device *dev) | 
 | 210 | { | 
 | 211 | 	init_timer(&dev->watchdog_timer); | 
 | 212 | 	dev->watchdog_timer.data = (unsigned long)dev; | 
 | 213 | 	dev->watchdog_timer.function = dev_watchdog; | 
 | 214 | } | 
 | 215 |  | 
 | 216 | void __netdev_watchdog_up(struct net_device *dev) | 
 | 217 | { | 
 | 218 | 	if (dev->tx_timeout) { | 
 | 219 | 		if (dev->watchdog_timeo <= 0) | 
 | 220 | 			dev->watchdog_timeo = 5*HZ; | 
 | 221 | 		if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) | 
 | 222 | 			dev_hold(dev); | 
 | 223 | 	} | 
 | 224 | } | 
 | 225 |  | 
 | 226 | static void dev_watchdog_up(struct net_device *dev) | 
 | 227 | { | 
 | 228 | 	spin_lock_bh(&dev->xmit_lock); | 
 | 229 | 	__netdev_watchdog_up(dev); | 
 | 230 | 	spin_unlock_bh(&dev->xmit_lock); | 
 | 231 | } | 
 | 232 |  | 
 | 233 | static void dev_watchdog_down(struct net_device *dev) | 
 | 234 | { | 
 | 235 | 	spin_lock_bh(&dev->xmit_lock); | 
 | 236 | 	if (del_timer(&dev->watchdog_timer)) | 
 | 237 | 		__dev_put(dev); | 
 | 238 | 	spin_unlock_bh(&dev->xmit_lock); | 
 | 239 | } | 
 | 240 |  | 
| Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 241 | void netif_carrier_on(struct net_device *dev) | 
 | 242 | { | 
 | 243 | 	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) | 
 | 244 | 		linkwatch_fire_event(dev); | 
 | 245 | 	if (netif_running(dev)) | 
 | 246 | 		__netdev_watchdog_up(dev); | 
 | 247 | } | 
 | 248 |  | 
 | 249 | void netif_carrier_off(struct net_device *dev) | 
 | 250 | { | 
 | 251 | 	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) | 
 | 252 | 		linkwatch_fire_event(dev); | 
 | 253 | } | 
 | 254 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces | 
 | 256 |    under all circumstances. It is difficult to invent anything faster or | 
 | 257 |    cheaper. | 
 | 258 |  */ | 
 | 259 |  | 
| Thomas Graf | 94df109 | 2005-06-18 22:59:08 -0700 | [diff] [blame] | 260 | static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | { | 
 | 262 | 	kfree_skb(skb); | 
 | 263 | 	return NET_XMIT_CN; | 
 | 264 | } | 
 | 265 |  | 
| Thomas Graf | 94df109 | 2005-06-18 22:59:08 -0700 | [diff] [blame] | 266 | static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | { | 
 | 268 | 	return NULL; | 
 | 269 | } | 
 | 270 |  | 
| Thomas Graf | 94df109 | 2005-06-18 22:59:08 -0700 | [diff] [blame] | 271 | static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | { | 
 | 273 | 	if (net_ratelimit()) | 
| Thomas Graf | 94df109 | 2005-06-18 22:59:08 -0700 | [diff] [blame] | 274 | 		printk(KERN_DEBUG "%s deferred output. It is buggy.\n", | 
 | 275 | 		       skb->dev->name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | 	kfree_skb(skb); | 
 | 277 | 	return NET_XMIT_CN; | 
 | 278 | } | 
 | 279 |  | 
 | 280 | struct Qdisc_ops noop_qdisc_ops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | 	.id		=	"noop", | 
 | 282 | 	.priv_size	=	0, | 
 | 283 | 	.enqueue	=	noop_enqueue, | 
 | 284 | 	.dequeue	=	noop_dequeue, | 
 | 285 | 	.requeue	=	noop_requeue, | 
 | 286 | 	.owner		=	THIS_MODULE, | 
 | 287 | }; | 
 | 288 |  | 
 | 289 | struct Qdisc noop_qdisc = { | 
 | 290 | 	.enqueue	=	noop_enqueue, | 
 | 291 | 	.dequeue	=	noop_dequeue, | 
 | 292 | 	.flags		=	TCQ_F_BUILTIN, | 
 | 293 | 	.ops		=	&noop_qdisc_ops,	 | 
 | 294 | 	.list		=	LIST_HEAD_INIT(noop_qdisc.list), | 
 | 295 | }; | 
 | 296 |  | 
 | 297 | static struct Qdisc_ops noqueue_qdisc_ops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | 	.id		=	"noqueue", | 
 | 299 | 	.priv_size	=	0, | 
 | 300 | 	.enqueue	=	noop_enqueue, | 
 | 301 | 	.dequeue	=	noop_dequeue, | 
 | 302 | 	.requeue	=	noop_requeue, | 
 | 303 | 	.owner		=	THIS_MODULE, | 
 | 304 | }; | 
 | 305 |  | 
 | 306 | static struct Qdisc noqueue_qdisc = { | 
 | 307 | 	.enqueue	=	NULL, | 
 | 308 | 	.dequeue	=	noop_dequeue, | 
 | 309 | 	.flags		=	TCQ_F_BUILTIN, | 
 | 310 | 	.ops		=	&noqueue_qdisc_ops, | 
 | 311 | 	.list		=	LIST_HEAD_INIT(noqueue_qdisc.list), | 
 | 312 | }; | 
 | 313 |  | 
 | 314 |  | 
 | 315 | static const u8 prio2band[TC_PRIO_MAX+1] = | 
 | 316 | 	{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; | 
 | 317 |  | 
 | 318 | /* 3-band FIFO queue: old style, but should be a bit faster than | 
 | 319 |    generic prio+fifo combination. | 
 | 320 |  */ | 
 | 321 |  | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 322 | #define PFIFO_FAST_BANDS 3 | 
 | 323 |  | 
| Thomas Graf | 321090e | 2005-06-18 22:58:35 -0700 | [diff] [blame] | 324 | static inline struct sk_buff_head *prio2list(struct sk_buff *skb, | 
 | 325 | 					     struct Qdisc *qdisc) | 
 | 326 | { | 
 | 327 | 	struct sk_buff_head *list = qdisc_priv(qdisc); | 
 | 328 | 	return list + prio2band[skb->priority & TC_PRIO_MAX]; | 
 | 329 | } | 
 | 330 |  | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 331 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | { | 
| Thomas Graf | 321090e | 2005-06-18 22:58:35 -0700 | [diff] [blame] | 333 | 	struct sk_buff_head *list = prio2list(skb, qdisc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 |  | 
| Thomas Graf | 821d24a | 2005-06-18 22:58:15 -0700 | [diff] [blame] | 335 | 	if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | 		qdisc->q.qlen++; | 
| Thomas Graf | 821d24a | 2005-06-18 22:58:15 -0700 | [diff] [blame] | 337 | 		return __qdisc_enqueue_tail(skb, qdisc, list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | 	} | 
| Thomas Graf | 821d24a | 2005-06-18 22:58:15 -0700 | [diff] [blame] | 339 |  | 
 | 340 | 	return qdisc_drop(skb, qdisc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | } | 
 | 342 |  | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 343 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | { | 
 | 345 | 	int prio; | 
 | 346 | 	struct sk_buff_head *list = qdisc_priv(qdisc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 |  | 
| Thomas Graf | 452f299 | 2005-07-18 13:30:53 -0700 | [diff] [blame] | 348 | 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 
 | 349 | 		if (!skb_queue_empty(list + prio)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | 			qdisc->q.qlen--; | 
| Thomas Graf | 452f299 | 2005-07-18 13:30:53 -0700 | [diff] [blame] | 351 | 			return __qdisc_dequeue_head(qdisc, list + prio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | 		} | 
 | 353 | 	} | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 354 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | 	return NULL; | 
 | 356 | } | 
 | 357 |  | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 358 | static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | 	qdisc->q.qlen++; | 
| Thomas Graf | 321090e | 2005-06-18 22:58:35 -0700 | [diff] [blame] | 361 | 	return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | } | 
 | 363 |  | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 364 | static void pfifo_fast_reset(struct Qdisc* qdisc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | { | 
 | 366 | 	int prio; | 
 | 367 | 	struct sk_buff_head *list = qdisc_priv(qdisc); | 
 | 368 |  | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 369 | 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 
| Thomas Graf | 821d24a | 2005-06-18 22:58:15 -0700 | [diff] [blame] | 370 | 		__qdisc_reset_queue(qdisc, list + prio); | 
 | 371 |  | 
 | 372 | 	qdisc->qstats.backlog = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | 	qdisc->q.qlen = 0; | 
 | 374 | } | 
 | 375 |  | 
 | 376 | static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) | 
 | 377 | { | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 378 | 	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | 	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); | 
 | 381 | 	RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 
 | 382 | 	return skb->len; | 
 | 383 |  | 
 | 384 | rtattr_failure: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | 	return -1; | 
 | 386 | } | 
 | 387 |  | 
 | 388 | static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt) | 
 | 389 | { | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 390 | 	int prio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | 	struct sk_buff_head *list = qdisc_priv(qdisc); | 
 | 392 |  | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 393 | 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 
 | 394 | 		skb_queue_head_init(list + prio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 |  | 
 | 396 | 	return 0; | 
 | 397 | } | 
 | 398 |  | 
 | 399 | static struct Qdisc_ops pfifo_fast_ops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | 	.id		=	"pfifo_fast", | 
| Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 401 | 	.priv_size	=	PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | 	.enqueue	=	pfifo_fast_enqueue, | 
 | 403 | 	.dequeue	=	pfifo_fast_dequeue, | 
 | 404 | 	.requeue	=	pfifo_fast_requeue, | 
 | 405 | 	.init		=	pfifo_fast_init, | 
 | 406 | 	.reset		=	pfifo_fast_reset, | 
 | 407 | 	.dump		=	pfifo_fast_dump, | 
 | 408 | 	.owner		=	THIS_MODULE, | 
 | 409 | }; | 
 | 410 |  | 
| Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 411 | struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | { | 
 | 413 | 	void *p; | 
 | 414 | 	struct Qdisc *sch; | 
| Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 415 | 	unsigned int size; | 
 | 416 | 	int err = -ENOBUFS; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 |  | 
 | 418 | 	/* ensure that the Qdisc and the private data are 32-byte aligned */ | 
| Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 419 | 	size = QDISC_ALIGN(sizeof(*sch)); | 
 | 420 | 	size += ops->priv_size + (QDISC_ALIGNTO - 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 |  | 
 | 422 | 	p = kmalloc(size, GFP_KERNEL); | 
 | 423 | 	if (!p) | 
| Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 424 | 		goto errout; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | 	memset(p, 0, size); | 
| Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 426 | 	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); | 
 | 427 | 	sch->padded = (char *) sch - (char *) p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 |  | 
 | 429 | 	INIT_LIST_HEAD(&sch->list); | 
 | 430 | 	skb_queue_head_init(&sch->q); | 
 | 431 | 	sch->ops = ops; | 
 | 432 | 	sch->enqueue = ops->enqueue; | 
 | 433 | 	sch->dequeue = ops->dequeue; | 
 | 434 | 	sch->dev = dev; | 
 | 435 | 	dev_hold(dev); | 
 | 436 | 	sch->stats_lock = &dev->queue_lock; | 
 | 437 | 	atomic_set(&sch->refcnt, 1); | 
| Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 438 |  | 
 | 439 | 	return sch; | 
 | 440 | errout: | 
 | 441 | 	return ERR_PTR(-err); | 
 | 442 | } | 
 | 443 |  | 
 | 444 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) | 
 | 445 | { | 
 | 446 | 	struct Qdisc *sch; | 
 | 447 | 	 | 
 | 448 | 	sch = qdisc_alloc(dev, ops); | 
 | 449 | 	if (IS_ERR(sch)) | 
 | 450 | 		goto errout; | 
 | 451 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | 	if (!ops->init || ops->init(sch, NULL) == 0) | 
 | 453 | 		return sch; | 
 | 454 |  | 
| Thomas Graf | 0fbbeb1 | 2005-08-23 10:12:44 -0700 | [diff] [blame] | 455 | 	qdisc_destroy(sch); | 
| Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 456 | errout: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | 	return NULL; | 
 | 458 | } | 
 | 459 |  | 
 | 460 | /* Under dev->queue_lock and BH! */ | 
 | 461 |  | 
 | 462 | void qdisc_reset(struct Qdisc *qdisc) | 
 | 463 | { | 
 | 464 | 	struct Qdisc_ops *ops = qdisc->ops; | 
 | 465 |  | 
 | 466 | 	if (ops->reset) | 
 | 467 | 		ops->reset(qdisc); | 
 | 468 | } | 
 | 469 |  | 
 | 470 | /* this is the rcu callback function to clean up a qdisc when there  | 
 | 471 |  * are no further references to it */ | 
 | 472 |  | 
 | 473 | static void __qdisc_destroy(struct rcu_head *head) | 
 | 474 | { | 
 | 475 | 	struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); | 
 | 476 | 	struct Qdisc_ops  *ops = qdisc->ops; | 
 | 477 |  | 
 | 478 | #ifdef CONFIG_NET_ESTIMATOR | 
 | 479 | 	gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); | 
 | 480 | #endif | 
 | 481 | 	write_lock(&qdisc_tree_lock); | 
 | 482 | 	if (ops->reset) | 
 | 483 | 		ops->reset(qdisc); | 
 | 484 | 	if (ops->destroy) | 
 | 485 | 		ops->destroy(qdisc); | 
 | 486 | 	write_unlock(&qdisc_tree_lock); | 
 | 487 | 	module_put(ops->owner); | 
 | 488 |  | 
 | 489 | 	dev_put(qdisc->dev); | 
 | 490 | 	kfree((char *) qdisc - qdisc->padded); | 
 | 491 | } | 
 | 492 |  | 
 | 493 | /* Under dev->queue_lock and BH! */ | 
 | 494 |  | 
 | 495 | void qdisc_destroy(struct Qdisc *qdisc) | 
 | 496 | { | 
 | 497 | 	struct list_head cql = LIST_HEAD_INIT(cql); | 
 | 498 | 	struct Qdisc *cq, *q, *n; | 
 | 499 |  | 
 | 500 | 	if (qdisc->flags & TCQ_F_BUILTIN || | 
 | 501 | 		!atomic_dec_and_test(&qdisc->refcnt)) | 
 | 502 | 		return; | 
 | 503 |  | 
 | 504 | 	if (!list_empty(&qdisc->list)) { | 
 | 505 | 		if (qdisc->ops->cl_ops == NULL) | 
 | 506 | 			list_del(&qdisc->list); | 
 | 507 | 		else | 
 | 508 | 			list_move(&qdisc->list, &cql); | 
 | 509 | 	} | 
 | 510 |  | 
 | 511 | 	/* unlink inner qdiscs from dev->qdisc_list immediately */ | 
 | 512 | 	list_for_each_entry(cq, &cql, list) | 
 | 513 | 		list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list) | 
 | 514 | 			if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) { | 
 | 515 | 				if (q->ops->cl_ops == NULL) | 
 | 516 | 					list_del_init(&q->list); | 
 | 517 | 				else | 
 | 518 | 					list_move_tail(&q->list, &cql); | 
 | 519 | 			} | 
 | 520 | 	list_for_each_entry_safe(cq, n, &cql, list) | 
 | 521 | 		list_del_init(&cq->list); | 
 | 522 |  | 
 | 523 | 	call_rcu(&qdisc->q_rcu, __qdisc_destroy); | 
 | 524 | } | 
 | 525 |  | 
 | 526 | void dev_activate(struct net_device *dev) | 
 | 527 | { | 
 | 528 | 	/* No queueing discipline is attached to device; | 
 | 529 | 	   create default one i.e. pfifo_fast for devices, | 
 | 530 | 	   which need queueing and noqueue_qdisc for | 
 | 531 | 	   virtual interfaces | 
 | 532 | 	 */ | 
 | 533 |  | 
 | 534 | 	if (dev->qdisc_sleeping == &noop_qdisc) { | 
 | 535 | 		struct Qdisc *qdisc; | 
 | 536 | 		if (dev->tx_queue_len) { | 
 | 537 | 			qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops); | 
 | 538 | 			if (qdisc == NULL) { | 
 | 539 | 				printk(KERN_INFO "%s: activation failed\n", dev->name); | 
 | 540 | 				return; | 
 | 541 | 			} | 
 | 542 | 			write_lock_bh(&qdisc_tree_lock); | 
 | 543 | 			list_add_tail(&qdisc->list, &dev->qdisc_list); | 
 | 544 | 			write_unlock_bh(&qdisc_tree_lock); | 
 | 545 | 		} else { | 
 | 546 | 			qdisc =  &noqueue_qdisc; | 
 | 547 | 		} | 
 | 548 | 		write_lock_bh(&qdisc_tree_lock); | 
 | 549 | 		dev->qdisc_sleeping = qdisc; | 
 | 550 | 		write_unlock_bh(&qdisc_tree_lock); | 
 | 551 | 	} | 
 | 552 |  | 
| Tommy S. Christensen | cacaddf | 2005-05-03 16:18:52 -0700 | [diff] [blame] | 553 | 	if (!netif_carrier_ok(dev)) | 
 | 554 | 		/* Delay activation until next carrier-on event */ | 
 | 555 | 		return; | 
 | 556 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | 	spin_lock_bh(&dev->queue_lock); | 
 | 558 | 	rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); | 
 | 559 | 	if (dev->qdisc != &noqueue_qdisc) { | 
 | 560 | 		dev->trans_start = jiffies; | 
 | 561 | 		dev_watchdog_up(dev); | 
 | 562 | 	} | 
 | 563 | 	spin_unlock_bh(&dev->queue_lock); | 
 | 564 | } | 
 | 565 |  | 
 | 566 | void dev_deactivate(struct net_device *dev) | 
 | 567 | { | 
 | 568 | 	struct Qdisc *qdisc; | 
 | 569 |  | 
 | 570 | 	spin_lock_bh(&dev->queue_lock); | 
 | 571 | 	qdisc = dev->qdisc; | 
 | 572 | 	dev->qdisc = &noop_qdisc; | 
 | 573 |  | 
 | 574 | 	qdisc_reset(qdisc); | 
 | 575 |  | 
 | 576 | 	spin_unlock_bh(&dev->queue_lock); | 
 | 577 |  | 
 | 578 | 	dev_watchdog_down(dev); | 
 | 579 |  | 
 | 580 | 	while (test_bit(__LINK_STATE_SCHED, &dev->state)) | 
 | 581 | 		yield(); | 
 | 582 |  | 
 | 583 | 	spin_unlock_wait(&dev->xmit_lock); | 
 | 584 | } | 
 | 585 |  | 
 | 586 | void dev_init_scheduler(struct net_device *dev) | 
 | 587 | { | 
 | 588 | 	qdisc_lock_tree(dev); | 
 | 589 | 	dev->qdisc = &noop_qdisc; | 
 | 590 | 	dev->qdisc_sleeping = &noop_qdisc; | 
 | 591 | 	INIT_LIST_HEAD(&dev->qdisc_list); | 
 | 592 | 	qdisc_unlock_tree(dev); | 
 | 593 |  | 
 | 594 | 	dev_watchdog_init(dev); | 
 | 595 | } | 
 | 596 |  | 
 | 597 | void dev_shutdown(struct net_device *dev) | 
 | 598 | { | 
 | 599 | 	struct Qdisc *qdisc; | 
 | 600 |  | 
 | 601 | 	qdisc_lock_tree(dev); | 
 | 602 | 	qdisc = dev->qdisc_sleeping; | 
 | 603 | 	dev->qdisc = &noop_qdisc; | 
 | 604 | 	dev->qdisc_sleeping = &noop_qdisc; | 
 | 605 | 	qdisc_destroy(qdisc); | 
 | 606 | #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) | 
 | 607 |         if ((qdisc = dev->qdisc_ingress) != NULL) { | 
 | 608 | 		dev->qdisc_ingress = NULL; | 
 | 609 | 		qdisc_destroy(qdisc); | 
 | 610 |         } | 
 | 611 | #endif | 
 | 612 | 	BUG_TRAP(!timer_pending(&dev->watchdog_timer)); | 
 | 613 | 	qdisc_unlock_tree(dev); | 
 | 614 | } | 
 | 615 |  | 
 | 616 | EXPORT_SYMBOL(__netdev_watchdog_up); | 
| Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 617 | EXPORT_SYMBOL(netif_carrier_on); | 
 | 618 | EXPORT_SYMBOL(netif_carrier_off); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | EXPORT_SYMBOL(noop_qdisc); | 
 | 620 | EXPORT_SYMBOL(noop_qdisc_ops); | 
 | 621 | EXPORT_SYMBOL(qdisc_create_dflt); | 
| Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 622 | EXPORT_SYMBOL(qdisc_alloc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | EXPORT_SYMBOL(qdisc_destroy); | 
 | 624 | EXPORT_SYMBOL(qdisc_reset); | 
 | 625 | EXPORT_SYMBOL(qdisc_restart); | 
 | 626 | EXPORT_SYMBOL(qdisc_lock_tree); | 
 | 627 | EXPORT_SYMBOL(qdisc_unlock_tree); |