| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Equalizer Load-balancer for serial network interfaces. | 
 | 3 |  * | 
 | 4 |  * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes | 
 | 5 |  * NCM: Network and Communications Management, Inc. | 
 | 6 |  * | 
 | 7 |  * (c) Copyright 2002 David S. Miller (davem@redhat.com) | 
 | 8 |  * | 
 | 9 |  *	This software may be used and distributed according to the terms | 
 | 10 |  *	of the GNU General Public License, incorporated herein by reference. | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 11 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 |  * The author may be reached as simon@ncm.com, or C/O | 
 | 13 |  *    NCM | 
 | 14 |  *    Attn: Simon Janes | 
 | 15 |  *    6803 Whittier Ave | 
 | 16 |  *    McLean VA 22101 | 
 | 17 |  *    Phone: 1-703-847-0040 ext 103 | 
 | 18 |  */ | 
 | 19 |  | 
 | 20 | /* | 
 | 21 |  * Sources: | 
 | 22 |  *   skeleton.c by Donald Becker. | 
 | 23 |  * Inspirations: | 
 | 24 |  *   The Harried and Overworked Alan Cox | 
 | 25 |  * Conspiracies: | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 26 |  *   The Alan Cox and Mike McLagan plot to get someone else to do the code, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 |  *   which turned out to be me. | 
 | 28 |  */ | 
 | 29 |  | 
 | 30 | /* | 
 | 31 |  * $Log: eql.c,v $ | 
 | 32 |  * Revision 1.2  1996/04/11 17:51:52  guru | 
 | 33 |  * Added one-line eql_remove_slave patch. | 
 | 34 |  * | 
 | 35 |  * Revision 1.1  1996/04/11 17:44:17  guru | 
 | 36 |  * Initial revision | 
 | 37 |  * | 
 | 38 |  * Revision 3.13  1996/01/21  15:17:18  alan | 
 | 39 |  * tx_queue_len changes. | 
 | 40 |  * reformatted. | 
 | 41 |  * | 
 | 42 |  * Revision 3.12  1995/03/22  21:07:51  anarchy | 
 | 43 |  * Added capable() checks on configuration. | 
 | 44 |  * Moved header file. | 
 | 45 |  * | 
 | 46 |  * Revision 3.11  1995/01/19  23:14:31  guru | 
 | 47 |  * 		      slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - | 
 | 48 |  * 			(priority_Bps) + bytes_queued * 8; | 
 | 49 |  * | 
 | 50 |  * Revision 3.10  1995/01/19  23:07:53  guru | 
 | 51 |  * back to | 
 | 52 |  * 		      slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - | 
 | 53 |  * 			(priority_Bps) + bytes_queued; | 
 | 54 |  * | 
 | 55 |  * Revision 3.9  1995/01/19  22:38:20  guru | 
 | 56 |  * 		      slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - | 
 | 57 |  * 			(priority_Bps) + bytes_queued * 4; | 
 | 58 |  * | 
 | 59 |  * Revision 3.8  1995/01/19  22:30:55  guru | 
 | 60 |  *       slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - | 
 | 61 |  * 			(priority_Bps) + bytes_queued * 2; | 
 | 62 |  * | 
 | 63 |  * Revision 3.7  1995/01/19  21:52:35  guru | 
 | 64 |  * printk's trimmed out. | 
 | 65 |  * | 
 | 66 |  * Revision 3.6  1995/01/19  21:49:56  guru | 
 | 67 |  * This is working pretty well. I gained 1 K/s in speed.. now it's just | 
 | 68 |  * robustness and printk's to be diked out. | 
 | 69 |  * | 
 | 70 |  * Revision 3.5  1995/01/18  22:29:59  guru | 
 | 71 |  * still crashes the kernel when the lock_wait thing is woken up. | 
 | 72 |  * | 
 | 73 |  * Revision 3.4  1995/01/18  21:59:47  guru | 
 | 74 |  * Broken set-bit locking snapshot | 
 | 75 |  * | 
 | 76 |  * Revision 3.3  1995/01/17  22:09:18  guru | 
 | 77 |  * infinite sleep in a lock somewhere.. | 
 | 78 |  * | 
 | 79 |  * Revision 3.2  1995/01/15  16:46:06  guru | 
 | 80 |  * Log trimmed of non-pertinent 1.x branch messages | 
 | 81 |  * | 
 | 82 |  * Revision 3.1  1995/01/15  14:41:45  guru | 
 | 83 |  * New Scheduler and timer stuff... | 
 | 84 |  * | 
 | 85 |  * Revision 1.15  1995/01/15  14:29:02  guru | 
 | 86 |  * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one | 
 | 87 |  * with the dumber scheduler | 
 | 88 |  * | 
 | 89 |  * Revision 1.14  1995/01/15  02:37:08  guru | 
 | 90 |  * shock.. the kept-new-versions could have zonked working | 
 | 91 |  * stuff.. shudder | 
 | 92 |  * | 
 | 93 |  * Revision 1.13  1995/01/15  02:36:31  guru | 
 | 94 |  * big changes | 
 | 95 |  * | 
 | 96 |  * 	scheduler was torn out and replaced with something smarter | 
 | 97 |  * | 
 | 98 |  * 	global names not prefixed with eql_ were renamed to protect | 
 | 99 |  * 	against namespace collisions | 
 | 100 |  * | 
 | 101 |  * 	a few more abstract interfaces were added to facilitate any | 
 | 102 |  * 	potential change of datastructure.  the driver is still using | 
 | 103 |  * 	a linked list of slaves.  going to a heap would be a bit of | 
 | 104 |  * 	an overkill. | 
 | 105 |  * | 
 | 106 |  * 	this compiles fine with no warnings. | 
 | 107 |  * | 
 | 108 |  * 	the locking mechanism and timer stuff must be written however, | 
 | 109 |  * 	this version will not work otherwise | 
 | 110 |  * | 
 | 111 |  * Sorry, I had to rewrite most of this for 2.5.x -DaveM | 
 | 112 |  */ | 
 | 113 |  | 
 | 114 | #include <linux/module.h> | 
 | 115 | #include <linux/kernel.h> | 
 | 116 | #include <linux/init.h> | 
 | 117 | #include <linux/timer.h> | 
 | 118 | #include <linux/netdevice.h> | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 119 | #include <net/net_namespace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 |  | 
 | 121 | #include <linux/if.h> | 
 | 122 | #include <linux/if_arp.h> | 
 | 123 | #include <linux/if_eql.h> | 
 | 124 |  | 
 | 125 | #include <asm/uaccess.h> | 
 | 126 |  | 
 | 127 | static int eql_open(struct net_device *dev); | 
 | 128 | static int eql_close(struct net_device *dev); | 
 | 129 | static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 
 | 130 | static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 |  | 
 | 132 | #define eql_is_slave(dev)	((dev->flags & IFF_SLAVE) == IFF_SLAVE) | 
 | 133 | #define eql_is_master(dev)	((dev->flags & IFF_MASTER) == IFF_MASTER) | 
 | 134 |  | 
| Loic Le Loarer | 14a59e1 | 2005-07-21 14:16:54 -0700 | [diff] [blame] | 135 | static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 |  | 
 | 137 | static void eql_timer(unsigned long param) | 
 | 138 | { | 
 | 139 | 	equalizer_t *eql = (equalizer_t *) param; | 
 | 140 | 	struct list_head *this, *tmp, *head; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 141 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | 	spin_lock_bh(&eql->queue.lock); | 
 | 143 | 	head = &eql->queue.all_slaves; | 
 | 144 | 	list_for_each_safe(this, tmp, head) { | 
 | 145 | 		slave_t *slave = list_entry(this, slave_t, list); | 
 | 146 |  | 
 | 147 | 		if ((slave->dev->flags & IFF_UP) == IFF_UP) { | 
 | 148 | 			slave->bytes_queued -= slave->priority_Bps; | 
 | 149 | 			if (slave->bytes_queued < 0) | 
 | 150 | 				slave->bytes_queued = 0; | 
 | 151 | 		} else { | 
| Loic Le Loarer | 14a59e1 | 2005-07-21 14:16:54 -0700 | [diff] [blame] | 152 | 			eql_kill_one_slave(&eql->queue, slave); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | 		} | 
 | 154 |  | 
 | 155 | 	} | 
 | 156 | 	spin_unlock_bh(&eql->queue.lock); | 
 | 157 |  | 
 | 158 | 	eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; | 
 | 159 | 	add_timer(&eql->timer); | 
 | 160 | } | 
 | 161 |  | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 162 | static char version[] __initdata = | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | 	"Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n"; | 
 | 164 |  | 
| Stephen Hemminger | 99921b7 | 2008-11-21 17:37:54 -0800 | [diff] [blame] | 165 | static const struct net_device_ops eql_netdev_ops = { | 
 | 166 | 	.ndo_open	= eql_open, | 
 | 167 | 	.ndo_stop	= eql_close, | 
 | 168 | 	.ndo_do_ioctl	= eql_ioctl, | 
 | 169 | 	.ndo_start_xmit	= eql_slave_xmit, | 
 | 170 | }; | 
 | 171 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | static void __init eql_setup(struct net_device *dev) | 
 | 173 | { | 
 | 174 | 	equalizer_t *eql = netdev_priv(dev); | 
 | 175 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | 	init_timer(&eql->timer); | 
 | 177 | 	eql->timer.data     	= (unsigned long) eql; | 
 | 178 | 	eql->timer.expires  	= jiffies + EQL_DEFAULT_RESCHED_IVAL; | 
 | 179 | 	eql->timer.function 	= eql_timer; | 
 | 180 |  | 
 | 181 | 	spin_lock_init(&eql->queue.lock); | 
 | 182 | 	INIT_LIST_HEAD(&eql->queue.all_slaves); | 
 | 183 | 	eql->queue.master_dev	= dev; | 
 | 184 |  | 
| Stephen Hemminger | 99921b7 | 2008-11-21 17:37:54 -0800 | [diff] [blame] | 185 | 	dev->netdev_ops		= &eql_netdev_ops; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 186 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | 	/* | 
 | 188 | 	 *	Now we undo some of the things that eth_setup does | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 189 | 	 * 	that we don't like | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | 	 */ | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 191 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | 	dev->mtu        	= EQL_DEFAULT_MTU;	/* set to 576 in if_eql.h */ | 
 | 193 | 	dev->flags      	= IFF_MASTER; | 
 | 194 |  | 
 | 195 | 	dev->type       	= ARPHRD_SLIP; | 
 | 196 | 	dev->tx_queue_len 	= 5;		/* Hands them off fast */ | 
 | 197 | } | 
 | 198 |  | 
 | 199 | static int eql_open(struct net_device *dev) | 
 | 200 | { | 
 | 201 | 	equalizer_t *eql = netdev_priv(dev); | 
 | 202 |  | 
 | 203 | 	/* XXX We should force this off automatically for the user. */ | 
 | 204 | 	printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " | 
 | 205 | 	       "your slave devices.\n", dev->name); | 
 | 206 |  | 
| Eric Sesterhenn | 5d9428d | 2006-04-02 13:52:48 +0200 | [diff] [blame] | 207 | 	BUG_ON(!list_empty(&eql->queue.all_slaves)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 |  | 
 | 209 | 	eql->min_slaves = 1; | 
 | 210 | 	eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ | 
 | 211 |  | 
 | 212 | 	add_timer(&eql->timer); | 
 | 213 |  | 
 | 214 | 	return 0; | 
 | 215 | } | 
 | 216 |  | 
| Loic Le Loarer | 14a59e1 | 2005-07-21 14:16:54 -0700 | [diff] [blame] | 217 | static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | { | 
 | 219 | 	list_del(&slave->list); | 
| Loic Le Loarer | 14a59e1 | 2005-07-21 14:16:54 -0700 | [diff] [blame] | 220 | 	queue->num_slaves--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | 	slave->dev->flags &= ~IFF_SLAVE; | 
 | 222 | 	dev_put(slave->dev); | 
 | 223 | 	kfree(slave); | 
 | 224 | } | 
 | 225 |  | 
 | 226 | static void eql_kill_slave_queue(slave_queue_t *queue) | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 227 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | 	struct list_head *head, *tmp, *this; | 
 | 229 |  | 
 | 230 | 	spin_lock_bh(&queue->lock); | 
 | 231 |  | 
 | 232 | 	head = &queue->all_slaves; | 
 | 233 | 	list_for_each_safe(this, tmp, head) { | 
 | 234 | 		slave_t *s = list_entry(this, slave_t, list); | 
 | 235 |  | 
| Loic Le Loarer | 14a59e1 | 2005-07-21 14:16:54 -0700 | [diff] [blame] | 236 | 		eql_kill_one_slave(queue, s); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | 	} | 
 | 238 |  | 
 | 239 | 	spin_unlock_bh(&queue->lock); | 
 | 240 | } | 
 | 241 |  | 
 | 242 | static int eql_close(struct net_device *dev) | 
 | 243 | { | 
 | 244 | 	equalizer_t *eql = netdev_priv(dev); | 
 | 245 |  | 
 | 246 | 	/* | 
 | 247 | 	 *	The timer has to be stopped first before we start hacking away | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 248 | 	 *	at the data structure it scans every so often... | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | 	 */ | 
 | 250 |  | 
 | 251 | 	del_timer_sync(&eql->timer); | 
 | 252 |  | 
 | 253 | 	eql_kill_slave_queue(&eql->queue); | 
 | 254 |  | 
 | 255 | 	return 0; | 
 | 256 | } | 
 | 257 |  | 
 | 258 | static int eql_enslave(struct net_device *dev,  slaving_request_t __user *srq); | 
 | 259 | static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq); | 
 | 260 |  | 
 | 261 | static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc); | 
 | 262 | static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc); | 
 | 263 |  | 
 | 264 | static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc); | 
 | 265 | static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc); | 
 | 266 |  | 
 | 267 | static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 268 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | 	if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG && | 
 | 270 | 	    !capable(CAP_NET_ADMIN)) | 
 | 271 | 	  	return -EPERM; | 
 | 272 |  | 
 | 273 | 	switch (cmd) { | 
 | 274 | 		case EQL_ENSLAVE: | 
 | 275 | 			return eql_enslave(dev, ifr->ifr_data); | 
 | 276 | 		case EQL_EMANCIPATE: | 
 | 277 | 			return eql_emancipate(dev, ifr->ifr_data); | 
 | 278 | 		case EQL_GETSLAVECFG: | 
 | 279 | 			return eql_g_slave_cfg(dev, ifr->ifr_data); | 
 | 280 | 		case EQL_SETSLAVECFG: | 
 | 281 | 			return eql_s_slave_cfg(dev, ifr->ifr_data); | 
 | 282 | 		case EQL_GETMASTRCFG: | 
 | 283 | 			return eql_g_master_cfg(dev, ifr->ifr_data); | 
 | 284 | 		case EQL_SETMASTRCFG: | 
 | 285 | 			return eql_s_master_cfg(dev, ifr->ifr_data); | 
 | 286 | 		default: | 
 | 287 | 			return -EOPNOTSUPP; | 
 | 288 | 	}; | 
 | 289 | } | 
 | 290 |  | 
 | 291 | /* queue->lock must be held */ | 
 | 292 | static slave_t *__eql_schedule_slaves(slave_queue_t *queue) | 
 | 293 | { | 
 | 294 | 	unsigned long best_load = ~0UL; | 
 | 295 | 	struct list_head *this, *tmp, *head; | 
 | 296 | 	slave_t *best_slave; | 
 | 297 |  | 
 | 298 | 	best_slave = NULL; | 
 | 299 |  | 
 | 300 | 	/* Make a pass to set the best slave. */ | 
 | 301 | 	head = &queue->all_slaves; | 
 | 302 | 	list_for_each_safe(this, tmp, head) { | 
 | 303 | 		slave_t *slave = list_entry(this, slave_t, list); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 304 | 		unsigned long slave_load, bytes_queued, priority_Bps; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 |  | 
 | 306 | 		/* Go through the slave list once, updating best_slave | 
 | 307 | 		 * whenever a new best_load is found. | 
 | 308 | 		 */ | 
 | 309 | 		bytes_queued = slave->bytes_queued; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 310 | 		priority_Bps = slave->priority_Bps; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | 		if ((slave->dev->flags & IFF_UP) == IFF_UP) { | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 312 | 			slave_load = (~0UL - (~0UL / 2)) - | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | 				(priority_Bps) + bytes_queued * 8; | 
 | 314 |  | 
 | 315 | 			if (slave_load < best_load) { | 
 | 316 | 				best_load = slave_load; | 
 | 317 | 				best_slave = slave; | 
 | 318 | 			} | 
 | 319 | 		} else { | 
 | 320 | 			/* We found a dead slave, kill it. */ | 
| Loic Le Loarer | 14a59e1 | 2005-07-21 14:16:54 -0700 | [diff] [blame] | 321 | 			eql_kill_one_slave(queue, slave); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | 		} | 
 | 323 | 	} | 
 | 324 | 	return best_slave; | 
 | 325 | } | 
 | 326 |  | 
 | 327 | static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) | 
 | 328 | { | 
 | 329 | 	equalizer_t *eql = netdev_priv(dev); | 
 | 330 | 	slave_t *slave; | 
 | 331 |  | 
 | 332 | 	spin_lock(&eql->queue.lock); | 
 | 333 |  | 
 | 334 | 	slave = __eql_schedule_slaves(&eql->queue); | 
 | 335 | 	if (slave) { | 
 | 336 | 		struct net_device *slave_dev = slave->dev; | 
 | 337 |  | 
 | 338 | 		skb->dev = slave_dev; | 
 | 339 | 		skb->priority = 1; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 340 | 		slave->bytes_queued += skb->len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | 		dev_queue_xmit(skb); | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 342 | 		dev->stats.tx_packets++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | 	} else { | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 344 | 		dev->stats.tx_dropped++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | 		dev_kfree_skb(skb); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 346 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 |  | 
 | 348 | 	spin_unlock(&eql->queue.lock); | 
 | 349 |  | 
 | 350 | 	return 0; | 
 | 351 | } | 
 | 352 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | /* | 
 | 354 |  *	Private ioctl functions | 
 | 355 |  */ | 
 | 356 |  | 
 | 357 | /* queue->lock must be held */ | 
 | 358 | static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev) | 
 | 359 | { | 
 | 360 | 	struct list_head *this, *head; | 
 | 361 |  | 
 | 362 | 	head = &queue->all_slaves; | 
 | 363 | 	list_for_each(this, head) { | 
 | 364 | 		slave_t *slave = list_entry(this, slave_t, list); | 
 | 365 |  | 
 | 366 | 		if (slave->dev == dev) | 
 | 367 | 			return slave; | 
 | 368 | 	} | 
 | 369 |  | 
 | 370 | 	return NULL; | 
 | 371 | } | 
 | 372 |  | 
 | 373 | static inline int eql_is_full(slave_queue_t *queue) | 
 | 374 | { | 
 | 375 | 	equalizer_t *eql = netdev_priv(queue->master_dev); | 
 | 376 |  | 
 | 377 | 	if (queue->num_slaves >= eql->max_slaves) | 
 | 378 | 		return 1; | 
 | 379 | 	return 0; | 
 | 380 | } | 
 | 381 |  | 
 | 382 | /* queue->lock must be held */ | 
 | 383 | static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) | 
 | 384 | { | 
 | 385 | 	if (!eql_is_full(queue)) { | 
 | 386 | 		slave_t *duplicate_slave = NULL; | 
 | 387 |  | 
 | 388 | 		duplicate_slave = __eql_find_slave_dev(queue, slave->dev); | 
| Stephen Hemminger | 37d2e73 | 2007-08-24 22:37:49 -0700 | [diff] [blame] | 389 | 		if (duplicate_slave) | 
| Loic Le Loarer | 14a59e1 | 2005-07-21 14:16:54 -0700 | [diff] [blame] | 390 | 			eql_kill_one_slave(queue, duplicate_slave); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 |  | 
 | 392 | 		list_add(&slave->list, &queue->all_slaves); | 
 | 393 | 		queue->num_slaves++; | 
 | 394 | 		slave->dev->flags |= IFF_SLAVE; | 
 | 395 |  | 
 | 396 | 		return 0; | 
 | 397 | 	} | 
 | 398 |  | 
 | 399 | 	return -ENOSPC; | 
 | 400 | } | 
 | 401 |  | 
 | 402 | static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp) | 
 | 403 | { | 
 | 404 | 	struct net_device *slave_dev; | 
 | 405 | 	slaving_request_t srq; | 
 | 406 |  | 
 | 407 | 	if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) | 
 | 408 | 		return -EFAULT; | 
 | 409 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 410 | 	slave_dev  = dev_get_by_name(&init_net, srq.slave_name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | 	if (slave_dev) { | 
 | 412 | 		if ((master_dev->flags & IFF_UP) == IFF_UP) { | 
 | 413 | 			/* slave is not a master & not already a slave: */ | 
 | 414 | 			if (!eql_is_master(slave_dev) && | 
 | 415 | 			    !eql_is_slave(slave_dev)) { | 
 | 416 | 				slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL); | 
 | 417 | 				equalizer_t *eql = netdev_priv(master_dev); | 
 | 418 | 				int ret; | 
 | 419 |  | 
 | 420 | 				if (!s) { | 
 | 421 | 					dev_put(slave_dev); | 
 | 422 | 					return -ENOMEM; | 
 | 423 | 				} | 
 | 424 |  | 
 | 425 | 				memset(s, 0, sizeof(*s)); | 
 | 426 | 				s->dev = slave_dev; | 
 | 427 | 				s->priority = srq.priority; | 
 | 428 | 				s->priority_bps = srq.priority; | 
 | 429 | 				s->priority_Bps = srq.priority / 8; | 
 | 430 |  | 
 | 431 | 				spin_lock_bh(&eql->queue.lock); | 
 | 432 | 				ret = __eql_insert_slave(&eql->queue, s); | 
 | 433 | 				if (ret) { | 
 | 434 | 					dev_put(slave_dev); | 
 | 435 | 					kfree(s); | 
 | 436 | 				} | 
 | 437 | 				spin_unlock_bh(&eql->queue.lock); | 
 | 438 |  | 
 | 439 | 				return ret; | 
 | 440 | 			} | 
 | 441 | 		} | 
 | 442 | 		dev_put(slave_dev); | 
 | 443 | 	} | 
 | 444 |  | 
 | 445 | 	return -EINVAL; | 
 | 446 | } | 
 | 447 |  | 
 | 448 | static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp) | 
 | 449 | { | 
 | 450 | 	equalizer_t *eql = netdev_priv(master_dev); | 
 | 451 | 	struct net_device *slave_dev; | 
 | 452 | 	slaving_request_t srq; | 
 | 453 | 	int ret; | 
 | 454 |  | 
 | 455 | 	if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) | 
 | 456 | 		return -EFAULT; | 
 | 457 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 458 | 	slave_dev = dev_get_by_name(&init_net, srq.slave_name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | 	ret = -EINVAL; | 
 | 460 | 	if (slave_dev) { | 
 | 461 | 		spin_lock_bh(&eql->queue.lock); | 
 | 462 |  | 
 | 463 | 		if (eql_is_slave(slave_dev)) { | 
 | 464 | 			slave_t *slave = __eql_find_slave_dev(&eql->queue, | 
 | 465 | 							      slave_dev); | 
 | 466 |  | 
 | 467 | 			if (slave) { | 
| Loic Le Loarer | 14a59e1 | 2005-07-21 14:16:54 -0700 | [diff] [blame] | 468 | 				eql_kill_one_slave(&eql->queue, slave); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | 				ret = 0; | 
 | 470 | 			} | 
 | 471 | 		} | 
 | 472 | 		dev_put(slave_dev); | 
 | 473 |  | 
 | 474 | 		spin_unlock_bh(&eql->queue.lock); | 
 | 475 | 	} | 
 | 476 |  | 
 | 477 | 	return ret; | 
 | 478 | } | 
 | 479 |  | 
 | 480 | static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp) | 
 | 481 | { | 
 | 482 | 	equalizer_t *eql = netdev_priv(dev); | 
 | 483 | 	slave_t *slave; | 
 | 484 | 	struct net_device *slave_dev; | 
 | 485 | 	slave_config_t sc; | 
 | 486 | 	int ret; | 
 | 487 |  | 
 | 488 | 	if (copy_from_user(&sc, scp, sizeof (slave_config_t))) | 
 | 489 | 		return -EFAULT; | 
 | 490 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 491 | 	slave_dev = dev_get_by_name(&init_net, sc.slave_name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | 	if (!slave_dev) | 
 | 493 | 		return -ENODEV; | 
 | 494 |  | 
 | 495 | 	ret = -EINVAL; | 
 | 496 |  | 
 | 497 | 	spin_lock_bh(&eql->queue.lock); | 
 | 498 | 	if (eql_is_slave(slave_dev)) { | 
 | 499 | 		slave = __eql_find_slave_dev(&eql->queue, slave_dev); | 
 | 500 | 		if (slave) { | 
 | 501 | 			sc.priority = slave->priority; | 
 | 502 | 			ret = 0; | 
 | 503 | 		} | 
 | 504 | 	} | 
 | 505 | 	spin_unlock_bh(&eql->queue.lock); | 
 | 506 |  | 
 | 507 | 	dev_put(slave_dev); | 
 | 508 |  | 
 | 509 | 	if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t))) | 
 | 510 | 		ret = -EFAULT; | 
 | 511 |  | 
 | 512 | 	return ret; | 
 | 513 | } | 
 | 514 |  | 
 | 515 | static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) | 
 | 516 | { | 
 | 517 | 	slave_t *slave; | 
 | 518 | 	equalizer_t *eql; | 
 | 519 | 	struct net_device *slave_dev; | 
 | 520 | 	slave_config_t sc; | 
 | 521 | 	int ret; | 
 | 522 |  | 
 | 523 | 	if (copy_from_user(&sc, scp, sizeof (slave_config_t))) | 
 | 524 | 		return -EFAULT; | 
 | 525 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 526 | 	slave_dev = dev_get_by_name(&init_net, sc.slave_name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | 	if (!slave_dev) | 
 | 528 | 		return -ENODEV; | 
 | 529 |  | 
 | 530 | 	ret = -EINVAL; | 
 | 531 |  | 
 | 532 | 	eql = netdev_priv(dev); | 
 | 533 | 	spin_lock_bh(&eql->queue.lock); | 
 | 534 | 	if (eql_is_slave(slave_dev)) { | 
 | 535 | 		slave = __eql_find_slave_dev(&eql->queue, slave_dev); | 
 | 536 | 		if (slave) { | 
 | 537 | 			slave->priority = sc.priority; | 
 | 538 | 			slave->priority_bps = sc.priority; | 
 | 539 | 			slave->priority_Bps = sc.priority / 8; | 
 | 540 | 			ret = 0; | 
 | 541 | 		} | 
 | 542 | 	} | 
 | 543 | 	spin_unlock_bh(&eql->queue.lock); | 
 | 544 |  | 
 | 545 | 	return ret; | 
 | 546 | } | 
 | 547 |  | 
 | 548 | static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) | 
 | 549 | { | 
 | 550 | 	equalizer_t *eql; | 
 | 551 | 	master_config_t mc; | 
 | 552 |  | 
 | 553 | 	if (eql_is_master(dev)) { | 
 | 554 | 		eql = netdev_priv(dev); | 
 | 555 | 		mc.max_slaves = eql->max_slaves; | 
 | 556 | 		mc.min_slaves = eql->min_slaves; | 
 | 557 | 		if (copy_to_user(mcp, &mc, sizeof (master_config_t))) | 
 | 558 | 			return -EFAULT; | 
 | 559 | 		return 0; | 
 | 560 | 	} | 
 | 561 | 	return -EINVAL; | 
 | 562 | } | 
 | 563 |  | 
 | 564 | static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp) | 
 | 565 | { | 
 | 566 | 	equalizer_t *eql; | 
 | 567 | 	master_config_t mc; | 
 | 568 |  | 
 | 569 | 	if (copy_from_user(&mc, mcp, sizeof (master_config_t))) | 
 | 570 | 		return -EFAULT; | 
 | 571 |  | 
 | 572 | 	if (eql_is_master(dev)) { | 
 | 573 | 		eql = netdev_priv(dev); | 
 | 574 | 		eql->max_slaves = mc.max_slaves; | 
 | 575 | 		eql->min_slaves = mc.min_slaves; | 
 | 576 | 		return 0; | 
 | 577 | 	} | 
 | 578 | 	return -EINVAL; | 
 | 579 | } | 
 | 580 |  | 
 | 581 | static struct net_device *dev_eql; | 
 | 582 |  | 
 | 583 | static int __init eql_init_module(void) | 
 | 584 | { | 
 | 585 | 	int err; | 
 | 586 |  | 
 | 587 | 	printk(version); | 
 | 588 |  | 
 | 589 | 	dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup); | 
 | 590 | 	if (!dev_eql) | 
 | 591 | 		return -ENOMEM; | 
 | 592 |  | 
 | 593 | 	err = register_netdev(dev_eql); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 594 | 	if (err) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | 		free_netdev(dev_eql); | 
 | 596 | 	return err; | 
 | 597 | } | 
 | 598 |  | 
 | 599 | static void __exit eql_cleanup_module(void) | 
 | 600 | { | 
 | 601 | 	unregister_netdev(dev_eql); | 
 | 602 | 	free_netdev(dev_eql); | 
 | 603 | } | 
 | 604 |  | 
 | 605 | module_init(eql_init_module); | 
 | 606 | module_exit(eql_cleanup_module); | 
 | 607 | MODULE_LICENSE("GPL"); |