| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *			Simple traffic shaper for Linux NET3. | 
 | 3 |  * | 
 | 4 |  *	(c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved. | 
 | 5 |  *				http://www.redhat.com | 
 | 6 |  * | 
 | 7 |  *	This program is free software; you can redistribute it and/or | 
 | 8 |  *	modify it under the terms of the GNU General Public License | 
 | 9 |  *	as published by the Free Software Foundation; either version | 
 | 10 |  *	2 of the License, or (at your option) any later version. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  * | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 12 |  *	Neither Alan Cox nor CymruNet Ltd. admit liability nor provide | 
 | 13 |  *	warranty for any of this software. This material is provided | 
 | 14 |  *	"AS-IS" and at no charge. | 
 | 15 |  * | 
 | 16 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  *	Algorithm: | 
 | 18 |  * | 
 | 19 |  *	Queue Frame: | 
 | 20 |  *		Compute time length of frame at regulated speed | 
 | 21 |  *		Add frame to queue at appropriate point | 
 | 22 |  *		Adjust time length computation for followup frames | 
 | 23 |  *		Any frame that falls outside of its boundaries is freed | 
 | 24 |  * | 
 | 25 |  *	We work to the following constants | 
 | 26 |  * | 
 | 27 |  *		SHAPER_QLEN	Maximum queued frames | 
 | 28 |  *		SHAPER_LATENCY	Bounding latency on a frame. Leaving this latency | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 29 |  *				window drops the frame. This stops us queueing | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 |  *				frames for a long time and confusing a remote | 
 | 31 |  *				host. | 
 | 32 |  *		SHAPER_MAXSLIP	Maximum time a priority frame may jump forward. | 
 | 33 |  *				That bounds the penalty we will inflict on low | 
 | 34 |  *				priority traffic. | 
 | 35 |  *		SHAPER_BURST	Time range we call "now" in order to reduce | 
 | 36 |  *				system load. The more we make this the burstier | 
 | 37 |  *				the behaviour, the better local performance you | 
 | 38 |  *				get through packet clustering on routers and the | 
 | 39 |  *				worse the remote end gets to judge rtts. | 
 | 40 |  * | 
 | 41 |  *	This is designed to handle lower speed links ( < 200K/second or so). We | 
 | 42 |  *	run off a 100-150Hz base clock typically. This gives us a resolution at | 
 | 43 |  *	200Kbit/second of about 2Kbit or 256 bytes. Above that our timer | 
 | 44 |  *	resolution may start to cause much more burstiness in the traffic. We | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 45 |  *	could avoid a lot of that by calling kick_shaper() at the end of the | 
 | 46 |  *	tied device transmissions. If you run above about 100K second you | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  *	may need to tune the supposed speed rate for the right values. | 
 | 48 |  * | 
 | 49 |  *	BUGS: | 
 | 50 |  *		Downing the interface under the shaper before the shaper | 
 | 51 |  *		will render your machine defunct. Don't for now shape over | 
 | 52 |  *		PPP or SLIP therefore! | 
 | 53 |  *		This will be fixed in BETA4 | 
 | 54 |  * | 
 | 55 |  * Update History : | 
 | 56 |  * | 
 | 57 |  *              bh_atomic() SMP races fixes and rewritten the locking code to | 
 | 58 |  *              be SMP safe and irq-mask friendly. | 
 | 59 |  *              NOTE: we can't use start_bh_atomic() in kick_shaper() | 
 | 60 |  *              because it's going to be recalled from an irq handler, | 
 | 61 |  *              and synchronize_bh() is a nono if called from irq context. | 
 | 62 |  *						1999  Andrea Arcangeli | 
 | 63 |  * | 
 | 64 |  *              Device statistics (tx_pakets, tx_bytes, | 
 | 65 |  *              tx_drops: queue_over_time and collisions: max_queue_exceded) | 
 | 66 |  *                               1999/06/18 Jordi Murgo <savage@apostols.org> | 
 | 67 |  * | 
 | 68 |  *		Use skb->cb for private data. | 
 | 69 |  *				 2000/03 Andi Kleen | 
 | 70 |  */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 71 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | #include <linux/module.h> | 
 | 73 | #include <linux/kernel.h> | 
 | 74 | #include <linux/fcntl.h> | 
 | 75 | #include <linux/mm.h> | 
 | 76 | #include <linux/slab.h> | 
 | 77 | #include <linux/string.h> | 
 | 78 | #include <linux/errno.h> | 
 | 79 | #include <linux/netdevice.h> | 
 | 80 | #include <linux/etherdevice.h> | 
 | 81 | #include <linux/skbuff.h> | 
 | 82 | #include <linux/if_arp.h> | 
 | 83 | #include <linux/init.h> | 
 | 84 | #include <linux/if_shaper.h> | 
| Marcelo Feitoza Parisi | ff5688a | 2006-01-09 18:37:15 -0800 | [diff] [blame] | 85 | #include <linux/jiffies.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 |  | 
 | 87 | #include <net/dst.h> | 
 | 88 | #include <net/arp.h> | 
 | 89 |  | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 90 | struct shaper_cb { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | 	unsigned long	shapeclock;		/* Time it should go out */ | 
 | 92 | 	unsigned long	shapestamp;		/* Stamp for shaper    */ | 
 | 93 | 	__u32		shapelatency;		/* Latency on frame */ | 
 | 94 | 	__u32		shapelen;		/* Frame length in clocks */ | 
 | 95 | 	__u16		shapepend;		/* Pending */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 96 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | #define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb)) | 
 | 98 |  | 
 | 99 | static int sh_debug;		/* Debug flag */ | 
 | 100 |  | 
 | 101 | #define SHAPER_BANNER	"CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n" | 
 | 102 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | static void shaper_kick(struct shaper *sh); | 
 | 104 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | /* | 
 | 106 |  *	Compute clocks on a buffer | 
 | 107 |  */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 108 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb) | 
 | 110 | { | 
 | 111 |  	int t=skb->len/shaper->bytespertick; | 
 | 112 |  	return t; | 
 | 113 | } | 
 | 114 |  | 
 | 115 | /* | 
 | 116 |  *	Set the speed of a shaper. We compute this in bytes per tick since | 
 | 117 |  *	thats how the machine wants to run. Quoted input is in bits per second | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 118 |  *	as is traditional (note not BAUD). We assume 8 bit bytes. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 |  */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 120 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | static void shaper_setspeed(struct shaper *shaper, int bitspersec) | 
 | 122 | { | 
 | 123 | 	shaper->bitspersec=bitspersec; | 
 | 124 | 	shaper->bytespertick=(bitspersec/HZ)/8; | 
 | 125 | 	if(!shaper->bytespertick) | 
 | 126 | 		shaper->bytespertick++; | 
 | 127 | } | 
 | 128 |  | 
 | 129 | /* | 
 | 130 |  *	Throw a frame at a shaper. | 
 | 131 |  */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 132 |  | 
| Christoph Hellwig | b597ef4 | 2005-06-02 16:36:00 -0700 | [diff] [blame] | 133 |  | 
 | 134 | static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | { | 
| Christoph Hellwig | b597ef4 | 2005-06-02 16:36:00 -0700 | [diff] [blame] | 136 | 	struct shaper *shaper = dev->priv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 |  	struct sk_buff *ptr; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 138 |  | 
| Christoph Hellwig | bc971de | 2005-07-05 15:03:46 -0700 | [diff] [blame] | 139 | 	spin_lock(&shaper->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 |  	ptr=shaper->sendq.prev; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 141 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 |  	/* | 
 | 143 |  	 *	Set up our packet details | 
 | 144 |  	 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 145 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 |  	SHAPERCB(skb)->shapelatency=0; | 
 | 147 |  	SHAPERCB(skb)->shapeclock=shaper->recovery; | 
 | 148 |  	if(time_before(SHAPERCB(skb)->shapeclock, jiffies)) | 
 | 149 |  		SHAPERCB(skb)->shapeclock=jiffies; | 
 | 150 |  	skb->priority=0;	/* short term bug fix */ | 
 | 151 |  	SHAPERCB(skb)->shapestamp=jiffies; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 152 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 |  	/* | 
 | 154 |  	 *	Time slots for this packet. | 
 | 155 |  	 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 156 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 |  	SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 158 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | 	{ | 
 | 160 | 		struct sk_buff *tmp; | 
 | 161 | 		/* | 
 | 162 | 		 *	Up our shape clock by the time pending on the queue | 
 | 163 | 		 *	(Should keep this in the shaper as a variable..) | 
 | 164 | 		 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 165 | 		for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | 			tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next) | 
 | 167 | 			SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen; | 
 | 168 | 		/* | 
 | 169 | 		 *	Queue over time. Spill packet. | 
 | 170 | 		 */ | 
| Marcelo Feitoza Parisi | ff5688a | 2006-01-09 18:37:15 -0800 | [diff] [blame] | 171 | 		if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | 			dev_kfree_skb(skb); | 
 | 173 | 			shaper->stats.tx_dropped++; | 
 | 174 | 		} else | 
 | 175 | 			skb_queue_tail(&shaper->sendq, skb); | 
 | 176 | 	} | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 177 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | 	if(sh_debug) | 
 | 179 |  		printk("Frame queued.\n"); | 
 | 180 |  	if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN) | 
 | 181 |  	{ | 
 | 182 |  		ptr=skb_dequeue(&shaper->sendq); | 
 | 183 |                 dev_kfree_skb(ptr); | 
 | 184 |                 shaper->stats.collisions++; | 
 | 185 |  	} | 
| Christoph Hellwig | b597ef4 | 2005-06-02 16:36:00 -0700 | [diff] [blame] | 186 | 	shaper_kick(shaper); | 
| Christoph Hellwig | bc971de | 2005-07-05 15:03:46 -0700 | [diff] [blame] | 187 | 	spin_unlock(&shaper->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  	return 0; | 
 | 189 | } | 
 | 190 |  | 
 | 191 | /* | 
 | 192 |  *	Transmit from a shaper | 
 | 193 |  */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 194 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb) | 
 | 196 | { | 
 | 197 | 	struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC); | 
 | 198 | 	if(sh_debug) | 
 | 199 | 		printk("Kick frame on %p\n",newskb); | 
 | 200 | 	if(newskb) | 
 | 201 | 	{ | 
 | 202 | 		newskb->dev=shaper->dev; | 
 | 203 | 		newskb->priority=2; | 
 | 204 | 		if(sh_debug) | 
 | 205 | 			printk("Kick new frame to %s, %d\n", | 
 | 206 | 				shaper->dev->name,newskb->priority); | 
 | 207 | 		dev_queue_xmit(newskb); | 
 | 208 |  | 
 | 209 |                 shaper->stats.tx_bytes += skb->len; | 
 | 210 | 		shaper->stats.tx_packets++; | 
 | 211 |  | 
 | 212 |                 if(sh_debug) | 
 | 213 | 			printk("Kicked new frame out.\n"); | 
 | 214 | 		dev_kfree_skb(skb); | 
 | 215 | 	} | 
 | 216 | } | 
 | 217 |  | 
 | 218 | /* | 
 | 219 |  *	Timer handler for shaping clock | 
 | 220 |  */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 221 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | static void shaper_timer(unsigned long data) | 
 | 223 | { | 
| Christoph Hellwig | b597ef4 | 2005-06-02 16:36:00 -0700 | [diff] [blame] | 224 | 	struct shaper *shaper = (struct shaper *)data; | 
 | 225 |  | 
| Christoph Hellwig | bc971de | 2005-07-05 15:03:46 -0700 | [diff] [blame] | 226 | 	spin_lock(&shaper->lock); | 
 | 227 | 	shaper_kick(shaper); | 
 | 228 | 	spin_unlock(&shaper->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | } | 
 | 230 |  | 
 | 231 | /* | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 232 |  *	Kick a shaper queue and try and do something sensible with the | 
 | 233 |  *	queue. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 |  */ | 
 | 235 |  | 
 | 236 | static void shaper_kick(struct shaper *shaper) | 
 | 237 | { | 
 | 238 | 	struct sk_buff *skb; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 239 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | 	 *	Walk the list (may be empty) | 
 | 242 | 	 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 243 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | 	while((skb=skb_peek(&shaper->sendq))!=NULL) | 
 | 245 | 	{ | 
 | 246 | 		/* | 
 | 247 | 		 *	Each packet due to go out by now (within an error | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 248 | 		 *	of SHAPER_BURST) gets kicked onto the link | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | 		 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 250 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | 		if(sh_debug) | 
 | 252 | 			printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies); | 
 | 253 | 		if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST)) | 
 | 254 | 		{ | 
 | 255 | 			/* | 
 | 256 | 			 *	Pull the frame and get interrupts back on. | 
 | 257 | 			 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 258 |  | 
| David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 259 | 			skb_unlink(skb, &shaper->sendq); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 260 | 			if (shaper->recovery < | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | 			    SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen) | 
 | 262 | 				shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen; | 
 | 263 | 			/* | 
 | 264 | 			 *	Pass on to the physical target device via | 
 | 265 | 			 *	our low level packet thrower. | 
 | 266 | 			 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 267 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | 			SHAPERCB(skb)->shapepend=0; | 
 | 269 | 			shaper_queue_xmit(shaper, skb);	/* Fire */ | 
 | 270 | 		} | 
 | 271 | 		else | 
 | 272 | 			break; | 
 | 273 | 	} | 
 | 274 |  | 
 | 275 | 	/* | 
 | 276 | 	 *	Next kick. | 
 | 277 | 	 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 278 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | 	if(skb!=NULL) | 
 | 280 | 		mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | } | 
 | 282 |  | 
 | 283 |  | 
 | 284 | /* | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 285 |  *	Bring the interface up. We just disallow this until a | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 |  *	bind. | 
 | 287 |  */ | 
 | 288 |  | 
 | 289 | static int shaper_open(struct net_device *dev) | 
 | 290 | { | 
 | 291 | 	struct shaper *shaper=dev->priv; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 292 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | 	/* | 
 | 294 | 	 *	Can't open until attached. | 
 | 295 | 	 *	Also can't open until speed is set, or we'll get | 
 | 296 | 	 *	a division by zero. | 
 | 297 | 	 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 298 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | 	if(shaper->dev==NULL) | 
 | 300 | 		return -ENODEV; | 
 | 301 | 	if(shaper->bitspersec==0) | 
 | 302 | 		return -EINVAL; | 
 | 303 | 	return 0; | 
 | 304 | } | 
 | 305 |  | 
 | 306 | /* | 
 | 307 |  *	Closing a shaper flushes the queues. | 
 | 308 |  */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 309 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | static int shaper_close(struct net_device *dev) | 
 | 311 | { | 
 | 312 | 	struct shaper *shaper=dev->priv; | 
| Christoph Hellwig | bc971de | 2005-07-05 15:03:46 -0700 | [diff] [blame] | 313 | 	struct sk_buff *skb; | 
 | 314 |  | 
 | 315 | 	while ((skb = skb_dequeue(&shaper->sendq)) != NULL) | 
 | 316 | 		dev_kfree_skb(skb); | 
 | 317 |  | 
 | 318 | 	spin_lock_bh(&shaper->lock); | 
 | 319 | 	shaper_kick(shaper); | 
 | 320 | 	spin_unlock_bh(&shaper->lock); | 
 | 321 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | 	del_timer_sync(&shaper->timer); | 
 | 323 | 	return 0; | 
 | 324 | } | 
 | 325 |  | 
 | 326 | /* | 
 | 327 |  *	Revectored calls. We alter the parameters and call the functions | 
 | 328 |  *	for our attached device. This enables us to bandwidth allocate after | 
 | 329 |  *	ARP and other resolutions and not before. | 
 | 330 |  */ | 
 | 331 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | static struct net_device_stats *shaper_get_stats(struct net_device *dev) | 
 | 333 | { | 
 | 334 |      	struct shaper *sh=dev->priv; | 
 | 335 | 	return &sh->stats; | 
 | 336 | } | 
 | 337 |  | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 338 | static int shaper_header(struct sk_buff *skb, struct net_device *dev, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | 	unsigned short type, void *daddr, void *saddr, unsigned len) | 
 | 340 | { | 
 | 341 | 	struct shaper *sh=dev->priv; | 
 | 342 | 	int v; | 
 | 343 | 	if(sh_debug) | 
 | 344 | 		printk("Shaper header\n"); | 
 | 345 | 	skb->dev=sh->dev; | 
 | 346 | 	v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len); | 
 | 347 | 	skb->dev=dev; | 
 | 348 | 	return v; | 
 | 349 | } | 
 | 350 |  | 
 | 351 | static int shaper_rebuild_header(struct sk_buff *skb) | 
 | 352 | { | 
 | 353 | 	struct shaper *sh=skb->dev->priv; | 
 | 354 | 	struct net_device *dev=skb->dev; | 
 | 355 | 	int v; | 
 | 356 | 	if(sh_debug) | 
 | 357 | 		printk("Shaper rebuild header\n"); | 
 | 358 | 	skb->dev=sh->dev; | 
 | 359 | 	v=sh->rebuild_header(skb); | 
 | 360 | 	skb->dev=dev; | 
 | 361 | 	return v; | 
 | 362 | } | 
 | 363 |  | 
 | 364 | #if 0 | 
 | 365 | static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh) | 
 | 366 | { | 
 | 367 | 	struct shaper *sh=neigh->dev->priv; | 
 | 368 | 	struct net_device *tmp; | 
 | 369 | 	int ret; | 
 | 370 | 	if(sh_debug) | 
 | 371 | 		printk("Shaper header cache bind\n"); | 
 | 372 | 	tmp=neigh->dev; | 
 | 373 | 	neigh->dev=sh->dev; | 
 | 374 | 	ret=sh->hard_header_cache(neigh,hh); | 
 | 375 | 	neigh->dev=tmp; | 
 | 376 | 	return ret; | 
 | 377 | } | 
 | 378 |  | 
 | 379 | static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev, | 
 | 380 | 	unsigned char *haddr) | 
 | 381 | { | 
 | 382 | 	struct shaper *sh=dev->priv; | 
 | 383 | 	if(sh_debug) | 
 | 384 | 		printk("Shaper cache update\n"); | 
 | 385 | 	sh->header_cache_update(hh, sh->dev, haddr); | 
 | 386 | } | 
 | 387 | #endif | 
 | 388 |  | 
 | 389 | #ifdef CONFIG_INET | 
 | 390 |  | 
 | 391 | static int shaper_neigh_setup(struct neighbour *n) | 
 | 392 | { | 
 | 393 | #ifdef CONFIG_INET | 
 | 394 | 	if (n->nud_state == NUD_NONE) { | 
 | 395 | 		n->ops = &arp_broken_ops; | 
 | 396 | 		n->output = n->ops->output; | 
 | 397 | 	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 398 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | 	return 0; | 
 | 400 | } | 
 | 401 |  | 
 | 402 | static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) | 
 | 403 | { | 
 | 404 | #ifdef CONFIG_INET | 
 | 405 | 	if (p->tbl->family == AF_INET) { | 
 | 406 | 		p->neigh_setup = shaper_neigh_setup; | 
 | 407 | 		p->ucast_probes = 0; | 
 | 408 | 		p->mcast_probes = 0; | 
 | 409 | 	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 410 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | 	return 0; | 
 | 412 | } | 
 | 413 |  | 
 | 414 | #else /* !(CONFIG_INET) */ | 
 | 415 |  | 
 | 416 | static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) | 
 | 417 | { | 
 | 418 | 	return 0; | 
 | 419 | } | 
 | 420 |  | 
 | 421 | #endif | 
 | 422 |  | 
 | 423 | static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev) | 
 | 424 | { | 
 | 425 | 	sh->dev = dev; | 
 | 426 | 	sh->hard_start_xmit=dev->hard_start_xmit; | 
 | 427 | 	sh->get_stats=dev->get_stats; | 
 | 428 | 	if(dev->hard_header) | 
 | 429 | 	{ | 
 | 430 | 		sh->hard_header=dev->hard_header; | 
 | 431 | 		shdev->hard_header = shaper_header; | 
 | 432 | 	} | 
 | 433 | 	else | 
 | 434 | 		shdev->hard_header = NULL; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 435 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | 	if(dev->rebuild_header) | 
 | 437 | 	{ | 
 | 438 | 		sh->rebuild_header	= dev->rebuild_header; | 
 | 439 | 		shdev->rebuild_header	= shaper_rebuild_header; | 
 | 440 | 	} | 
 | 441 | 	else | 
 | 442 | 		shdev->rebuild_header	= NULL; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 443 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | #if 0 | 
 | 445 | 	if(dev->hard_header_cache) | 
 | 446 | 	{ | 
 | 447 | 		sh->hard_header_cache	= dev->hard_header_cache; | 
 | 448 | 		shdev->hard_header_cache= shaper_cache; | 
 | 449 | 	} | 
 | 450 | 	else | 
 | 451 | 	{ | 
 | 452 | 		shdev->hard_header_cache= NULL; | 
 | 453 | 	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 454 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | 	if(dev->header_cache_update) | 
 | 456 | 	{ | 
 | 457 | 		sh->header_cache_update	= dev->header_cache_update; | 
 | 458 | 		shdev->header_cache_update = shaper_cache_update; | 
 | 459 | 	} | 
 | 460 | 	else | 
 | 461 | 		shdev->header_cache_update= NULL; | 
 | 462 | #else | 
 | 463 | 	shdev->header_cache_update = NULL; | 
 | 464 | 	shdev->hard_header_cache = NULL; | 
 | 465 | #endif | 
 | 466 | 	shdev->neigh_setup = shaper_neigh_setup_dev; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 467 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | 	shdev->hard_header_len=dev->hard_header_len; | 
 | 469 | 	shdev->type=dev->type; | 
 | 470 | 	shdev->addr_len=dev->addr_len; | 
 | 471 | 	shdev->mtu=dev->mtu; | 
 | 472 | 	sh->bitspersec=0; | 
 | 473 | 	return 0; | 
 | 474 | } | 
 | 475 |  | 
 | 476 | static int shaper_ioctl(struct net_device *dev,  struct ifreq *ifr, int cmd) | 
 | 477 | { | 
 | 478 | 	struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_ifru; | 
 | 479 | 	struct shaper *sh=dev->priv; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 480 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | 	if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED) | 
 | 482 | 	{ | 
 | 483 | 		if(!capable(CAP_NET_ADMIN)) | 
 | 484 | 			return -EPERM; | 
 | 485 | 	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 486 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | 	switch(ss->ss_cmd) | 
 | 488 | 	{ | 
 | 489 | 		case SHAPER_SET_DEV: | 
 | 490 | 		{ | 
 | 491 | 			struct net_device *them=__dev_get_by_name(ss->ss_name); | 
 | 492 | 			if(them==NULL) | 
 | 493 | 				return -ENODEV; | 
 | 494 | 			if(sh->dev) | 
 | 495 | 				return -EBUSY; | 
 | 496 | 			return shaper_attach(dev,dev->priv, them); | 
 | 497 | 		} | 
 | 498 | 		case SHAPER_GET_DEV: | 
 | 499 | 			if(sh->dev==NULL) | 
 | 500 | 				return -ENODEV; | 
 | 501 | 			strcpy(ss->ss_name, sh->dev->name); | 
 | 502 | 			return 0; | 
 | 503 | 		case SHAPER_SET_SPEED: | 
 | 504 | 			shaper_setspeed(sh,ss->ss_speed); | 
 | 505 | 			return 0; | 
 | 506 | 		case SHAPER_GET_SPEED: | 
 | 507 | 			ss->ss_speed=sh->bitspersec; | 
 | 508 | 			return 0; | 
 | 509 | 		default: | 
 | 510 | 			return -EINVAL; | 
 | 511 | 	} | 
 | 512 | } | 
 | 513 |  | 
 | 514 | static void shaper_init_priv(struct net_device *dev) | 
 | 515 | { | 
 | 516 | 	struct shaper *sh = dev->priv; | 
 | 517 |  | 
 | 518 | 	skb_queue_head_init(&sh->sendq); | 
 | 519 | 	init_timer(&sh->timer); | 
 | 520 | 	sh->timer.function=shaper_timer; | 
 | 521 | 	sh->timer.data=(unsigned long)sh; | 
| Christoph Hellwig | bc971de | 2005-07-05 15:03:46 -0700 | [diff] [blame] | 522 | 	spin_lock_init(&sh->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | } | 
 | 524 |  | 
 | 525 | /* | 
 | 526 |  *	Add a shaper device to the system | 
 | 527 |  */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 528 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | static void __init shaper_setup(struct net_device *dev) | 
 | 530 | { | 
 | 531 | 	/* | 
 | 532 | 	 *	Set up the shaper. | 
 | 533 | 	 */ | 
 | 534 |  | 
 | 535 | 	SET_MODULE_OWNER(dev); | 
 | 536 |  | 
 | 537 | 	shaper_init_priv(dev); | 
 | 538 |  | 
 | 539 | 	dev->open		= shaper_open; | 
 | 540 | 	dev->stop		= shaper_close; | 
 | 541 | 	dev->hard_start_xmit 	= shaper_start_xmit; | 
 | 542 | 	dev->get_stats 		= shaper_get_stats; | 
 | 543 | 	dev->set_multicast_list = NULL; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 544 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | 	/* | 
 | 546 | 	 *	Intialise the packet queues | 
 | 547 | 	 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 548 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | 	/* | 
 | 550 | 	 *	Handlers for when we attach to a device. | 
 | 551 | 	 */ | 
 | 552 |  | 
 | 553 | 	dev->hard_header 	= shaper_header; | 
 | 554 | 	dev->rebuild_header 	= shaper_rebuild_header; | 
 | 555 | #if 0 | 
 | 556 | 	dev->hard_header_cache	= shaper_cache; | 
 | 557 | 	dev->header_cache_update= shaper_cache_update; | 
 | 558 | #endif | 
 | 559 | 	dev->neigh_setup	= shaper_neigh_setup_dev; | 
 | 560 | 	dev->do_ioctl		= shaper_ioctl; | 
 | 561 | 	dev->hard_header_len	= 0; | 
 | 562 | 	dev->type		= ARPHRD_ETHER;	/* initially */ | 
 | 563 | 	dev->set_mac_address	= NULL; | 
 | 564 | 	dev->mtu		= 1500; | 
 | 565 | 	dev->addr_len		= 0; | 
 | 566 | 	dev->tx_queue_len	= 10; | 
 | 567 | 	dev->flags		= 0; | 
 | 568 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 569 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | static int shapers = 1; | 
 | 571 | #ifdef MODULE | 
 | 572 |  | 
 | 573 | module_param(shapers, int, 0); | 
 | 574 | MODULE_PARM_DESC(shapers, "Traffic shaper: maximum number of shapers"); | 
 | 575 |  | 
 | 576 | #else /* MODULE */ | 
 | 577 |  | 
 | 578 | static int __init set_num_shapers(char *str) | 
 | 579 | { | 
 | 580 | 	shapers = simple_strtol(str, NULL, 0); | 
 | 581 | 	return 1; | 
 | 582 | } | 
 | 583 |  | 
 | 584 | __setup("shapers=", set_num_shapers); | 
 | 585 |  | 
 | 586 | #endif /* MODULE */ | 
 | 587 |  | 
 | 588 | static struct net_device **devs; | 
 | 589 |  | 
 | 590 | static unsigned int shapers_registered = 0; | 
 | 591 |  | 
 | 592 | static int __init shaper_init(void) | 
 | 593 | { | 
 | 594 | 	int i; | 
 | 595 | 	size_t alloc_size; | 
 | 596 | 	struct net_device *dev; | 
 | 597 | 	char name[IFNAMSIZ]; | 
 | 598 |  | 
 | 599 | 	if (shapers < 1) | 
 | 600 | 		return -ENODEV; | 
 | 601 |  | 
 | 602 | 	alloc_size = sizeof(*dev) * shapers; | 
 | 603 | 	devs = kmalloc(alloc_size, GFP_KERNEL); | 
 | 604 | 	if (!devs) | 
 | 605 | 		return -ENOMEM; | 
 | 606 | 	memset(devs, 0, alloc_size); | 
 | 607 |  | 
 | 608 | 	for (i = 0; i < shapers; i++) { | 
 | 609 |  | 
 | 610 | 		snprintf(name, IFNAMSIZ, "shaper%d", i); | 
 | 611 | 		dev = alloc_netdev(sizeof(struct shaper), name, | 
 | 612 | 				   shaper_setup); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 613 | 		if (!dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | 			break; | 
 | 615 |  | 
 | 616 | 		if (register_netdev(dev)) { | 
 | 617 | 			free_netdev(dev); | 
 | 618 | 			break; | 
 | 619 | 		} | 
 | 620 |  | 
 | 621 | 		devs[i] = dev; | 
 | 622 | 		shapers_registered++; | 
 | 623 | 	} | 
 | 624 |  | 
 | 625 | 	if (!shapers_registered) { | 
 | 626 | 		kfree(devs); | 
 | 627 | 		devs = NULL; | 
 | 628 | 	} | 
 | 629 |  | 
 | 630 | 	return (shapers_registered ? 0 : -ENODEV); | 
 | 631 | } | 
 | 632 |  | 
 | 633 | static void __exit shaper_exit (void) | 
 | 634 | { | 
 | 635 | 	int i; | 
 | 636 |  | 
 | 637 | 	for (i = 0; i < shapers_registered; i++) { | 
 | 638 | 		if (devs[i]) { | 
 | 639 | 			unregister_netdev(devs[i]); | 
 | 640 | 			free_netdev(devs[i]); | 
 | 641 | 		} | 
 | 642 | 	} | 
 | 643 |  | 
 | 644 | 	kfree(devs); | 
 | 645 | 	devs = NULL; | 
 | 646 | } | 
 | 647 |  | 
 | 648 | module_init(shaper_init); | 
 | 649 | module_exit(shaper_exit); | 
 | 650 | MODULE_LICENSE("GPL"); | 
 | 651 |  |