| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * INET		An implementation of the TCP/IP protocol suite for the LINUX | 
 | 3 |  *		operating system.  INET is implemented using the  BSD Socket | 
 | 4 |  *		interface as the means of communication with the user level. | 
 | 5 |  * | 
 | 6 |  *		Definitions for the Interfaces handler. | 
 | 7 |  * | 
 | 8 |  * Version:	@(#)dev.h	1.0.10	08/12/93 | 
 | 9 |  * | 
| Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 10 |  * Authors:	Ross Biro | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 
 | 12 |  *		Corey Minyard <wf-rch!minyard@relay.EU.net> | 
 | 13 |  *		Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | 
 | 14 |  *		Alan Cox, <Alan.Cox@linux.org> | 
 | 15 |  *		Bjorn Ekwall. <bj0rn@blox.se> | 
 | 16 |  *              Pekka Riikonen <priikone@poseidon.pspt.fi> | 
 | 17 |  * | 
 | 18 |  *		This program is free software; you can redistribute it and/or | 
 | 19 |  *		modify it under the terms of the GNU General Public License | 
 | 20 |  *		as published by the Free Software Foundation; either version | 
 | 21 |  *		2 of the License, or (at your option) any later version. | 
 | 22 |  * | 
 | 23 |  *		Moved to /usr/include/linux for NET3 | 
 | 24 |  */ | 
 | 25 | #ifndef _LINUX_NETDEVICE_H | 
 | 26 | #define _LINUX_NETDEVICE_H | 
 | 27 |  | 
 | 28 | #include <linux/if.h> | 
 | 29 | #include <linux/if_ether.h> | 
 | 30 | #include <linux/if_packet.h> | 
 | 31 |  | 
 | 32 | #ifdef __KERNEL__ | 
| Al Viro | d7fe0f2 | 2006-12-03 23:15:30 -0500 | [diff] [blame] | 33 | #include <linux/timer.h> | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 34 | #include <linux/delay.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/atomic.h> | 
 | 36 | #include <asm/cache.h> | 
 | 37 | #include <asm/byteorder.h> | 
 | 38 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/device.h> | 
 | 40 | #include <linux/percpu.h> | 
| Chris Leech | db21733 | 2006-06-17 21:24:58 -0700 | [diff] [blame] | 41 | #include <linux/dmaengine.h> | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 42 | #include <linux/workqueue.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 |  | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 44 | #include <net/net_namespace.h> | 
 | 45 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | struct vlan_group; | 
 | 47 | struct ethtool_ops; | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 48 | struct netpoll_info; | 
| Johannes Berg | 704232c | 2007-04-23 12:20:05 -0700 | [diff] [blame] | 49 | /* 802.11 specific */ | 
 | 50 | struct wireless_dev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | 					/* source back-compat hooks */ | 
 | 52 | #define SET_ETHTOOL_OPS(netdev,ops) \ | 
 | 53 | 	( (netdev)->ethtool_ops = (ops) ) | 
 | 54 |  | 
 | 55 | #define HAVE_ALLOC_NETDEV		/* feature macro: alloc_xxxdev | 
 | 56 | 					   functions are available. */ | 
 | 57 | #define HAVE_FREE_NETDEV		/* free_netdev() */ | 
 | 58 | #define HAVE_NETDEV_PRIV		/* netdev_priv() */ | 
 | 59 |  | 
 | 60 | #define NET_XMIT_SUCCESS	0 | 
 | 61 | #define NET_XMIT_DROP		1	/* skb dropped			*/ | 
 | 62 | #define NET_XMIT_CN		2	/* congestion notification	*/ | 
 | 63 | #define NET_XMIT_POLICED	3	/* skb is shot by police	*/ | 
 | 64 | #define NET_XMIT_BYPASS		4	/* packet does not leave via dequeue; | 
 | 65 | 					   (TC use only - dev_queue_xmit | 
 | 66 | 					   returns this as NET_XMIT_SUCCESS) */ | 
 | 67 |  | 
 | 68 | /* Backlog congestion levels */ | 
 | 69 | #define NET_RX_SUCCESS		0   /* keep 'em coming, baby */ | 
 | 70 | #define NET_RX_DROP		1  /* packet dropped */ | 
 | 71 | #define NET_RX_CN_LOW		2   /* storm alert, just in case */ | 
 | 72 | #define NET_RX_CN_MOD		3   /* Storm on its way! */ | 
 | 73 | #define NET_RX_CN_HIGH		4   /* The storm is here */ | 
 | 74 | #define NET_RX_BAD		5  /* packet dropped due to kernel error */ | 
 | 75 |  | 
| Gerrit Renker | b9df3cb | 2006-11-14 11:21:36 -0200 | [diff] [blame] | 76 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It | 
 | 77 |  * indicates that the device will soon be dropping packets, or already drops | 
 | 78 |  * some packets of the same priority; prompting us to send less aggressively. */ | 
 | 79 | #define net_xmit_eval(e)	((e) == NET_XMIT_CN? 0 : (e)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | #define net_xmit_errno(e)	((e) != NET_XMIT_CN ? -ENOBUFS : 0) | 
 | 81 |  | 
 | 82 | #endif | 
 | 83 |  | 
 | 84 | #define MAX_ADDR_LEN	32		/* Largest hardware address length */ | 
 | 85 |  | 
 | 86 | /* Driver transmit return codes */ | 
 | 87 | #define NETDEV_TX_OK 0		/* driver took care of packet */ | 
 | 88 | #define NETDEV_TX_BUSY 1	/* driver tx path was busy*/ | 
 | 89 | #define NETDEV_TX_LOCKED -1	/* driver tx lock was already taken */ | 
 | 90 |  | 
 | 91 | /* | 
 | 92 |  *	Compute the worst case header length according to the protocols | 
 | 93 |  *	used. | 
 | 94 |  */ | 
 | 95 |   | 
| David S. Miller | 8388e3d | 2008-05-12 20:17:33 -0700 | [diff] [blame^] | 96 | #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | 
 | 97 | # if defined(CONFIG_MAC80211_MESH) | 
 | 98 | #  define LL_MAX_HEADER 128 | 
 | 99 | # else | 
 | 100 | #  define LL_MAX_HEADER 96 | 
 | 101 | # endif | 
 | 102 | #elif defined(CONFIG_TR) | 
 | 103 | # define LL_MAX_HEADER 48 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | #else | 
| David S. Miller | 8388e3d | 2008-05-12 20:17:33 -0700 | [diff] [blame^] | 105 | # define LL_MAX_HEADER 32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | #endif | 
 | 107 |  | 
| David S. Miller | e81c735 | 2006-11-28 20:53:39 -0800 | [diff] [blame] | 108 | #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ | 
 | 109 |     !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \ | 
 | 110 |     !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ | 
 | 111 |     !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | #define MAX_HEADER LL_MAX_HEADER | 
 | 113 | #else | 
 | 114 | #define MAX_HEADER (LL_MAX_HEADER + 48) | 
 | 115 | #endif | 
 | 116 |  | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 117 | struct net_device_subqueue | 
 | 118 | { | 
 | 119 | 	/* Give a control state for each queue.  This struct may contain | 
 | 120 | 	 * per-queue locks in the future. | 
 | 121 | 	 */ | 
 | 122 | 	unsigned long   state; | 
 | 123 | }; | 
 | 124 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | /* | 
 | 126 |  *	Network device statistics. Akin to the 2.0 ether stats but | 
 | 127 |  *	with byte counters. | 
 | 128 |  */ | 
 | 129 |   | 
 | 130 | struct net_device_stats | 
 | 131 | { | 
 | 132 | 	unsigned long	rx_packets;		/* total packets received	*/ | 
 | 133 | 	unsigned long	tx_packets;		/* total packets transmitted	*/ | 
 | 134 | 	unsigned long	rx_bytes;		/* total bytes received 	*/ | 
 | 135 | 	unsigned long	tx_bytes;		/* total bytes transmitted	*/ | 
 | 136 | 	unsigned long	rx_errors;		/* bad packets received		*/ | 
 | 137 | 	unsigned long	tx_errors;		/* packet transmit problems	*/ | 
 | 138 | 	unsigned long	rx_dropped;		/* no space in linux buffers	*/ | 
 | 139 | 	unsigned long	tx_dropped;		/* no space available in linux	*/ | 
 | 140 | 	unsigned long	multicast;		/* multicast packets received	*/ | 
 | 141 | 	unsigned long	collisions; | 
 | 142 |  | 
 | 143 | 	/* detailed rx_errors: */ | 
 | 144 | 	unsigned long	rx_length_errors; | 
 | 145 | 	unsigned long	rx_over_errors;		/* receiver ring buff overflow	*/ | 
 | 146 | 	unsigned long	rx_crc_errors;		/* recved pkt with crc error	*/ | 
 | 147 | 	unsigned long	rx_frame_errors;	/* recv'd frame alignment error */ | 
 | 148 | 	unsigned long	rx_fifo_errors;		/* recv'r fifo overrun		*/ | 
 | 149 | 	unsigned long	rx_missed_errors;	/* receiver missed packet	*/ | 
 | 150 |  | 
 | 151 | 	/* detailed tx_errors */ | 
 | 152 | 	unsigned long	tx_aborted_errors; | 
 | 153 | 	unsigned long	tx_carrier_errors; | 
 | 154 | 	unsigned long	tx_fifo_errors; | 
 | 155 | 	unsigned long	tx_heartbeat_errors; | 
 | 156 | 	unsigned long	tx_window_errors; | 
 | 157 | 	 | 
 | 158 | 	/* for cslip etc */ | 
 | 159 | 	unsigned long	rx_compressed; | 
 | 160 | 	unsigned long	tx_compressed; | 
 | 161 | }; | 
 | 162 |  | 
 | 163 |  | 
 | 164 | /* Media selection options. */ | 
 | 165 | enum { | 
 | 166 |         IF_PORT_UNKNOWN = 0, | 
 | 167 |         IF_PORT_10BASE2, | 
 | 168 |         IF_PORT_10BASET, | 
 | 169 |         IF_PORT_AUI, | 
 | 170 |         IF_PORT_100BASET, | 
 | 171 |         IF_PORT_100BASETX, | 
 | 172 |         IF_PORT_100BASEFX | 
 | 173 | }; | 
 | 174 |  | 
 | 175 | #ifdef __KERNEL__ | 
 | 176 |  | 
 | 177 | #include <linux/cache.h> | 
 | 178 | #include <linux/skbuff.h> | 
 | 179 |  | 
 | 180 | struct neighbour; | 
 | 181 | struct neigh_parms; | 
 | 182 | struct sk_buff; | 
 | 183 |  | 
 | 184 | struct netif_rx_stats | 
 | 185 | { | 
 | 186 | 	unsigned total; | 
 | 187 | 	unsigned dropped; | 
 | 188 | 	unsigned time_squeeze; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | 	unsigned cpu_collision; | 
 | 190 | }; | 
 | 191 |  | 
 | 192 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | 
 | 193 |  | 
| Patrick McHardy | bf74248 | 2007-06-27 01:26:19 -0700 | [diff] [blame] | 194 | struct dev_addr_list | 
 | 195 | { | 
 | 196 | 	struct dev_addr_list	*next; | 
 | 197 | 	u8			da_addr[MAX_ADDR_LEN]; | 
 | 198 | 	u8			da_addrlen; | 
| Patrick McHardy | a0a400d | 2007-07-14 18:52:02 -0700 | [diff] [blame] | 199 | 	u8			da_synced; | 
| Patrick McHardy | bf74248 | 2007-06-27 01:26:19 -0700 | [diff] [blame] | 200 | 	int			da_users; | 
 | 201 | 	int			da_gusers; | 
 | 202 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 |  | 
 | 204 | /* | 
 | 205 |  *	We tag multicasts with these structures. | 
 | 206 |  */ | 
| Patrick McHardy | 3fba5a8 | 2007-06-27 01:26:58 -0700 | [diff] [blame] | 207 |  | 
 | 208 | #define dev_mc_list	dev_addr_list | 
 | 209 | #define dmi_addr	da_addr | 
 | 210 | #define dmi_addrlen	da_addrlen | 
 | 211 | #define dmi_users	da_users | 
 | 212 | #define dmi_gusers	da_gusers | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 |  | 
 | 214 | struct hh_cache | 
 | 215 | { | 
 | 216 | 	struct hh_cache *hh_next;	/* Next entry			     */ | 
 | 217 | 	atomic_t	hh_refcnt;	/* number of users                   */ | 
| Eric Dumazet | f049098 | 2006-12-08 00:08:43 -0800 | [diff] [blame] | 218 | /* | 
 | 219 |  * We want hh_output, hh_len, hh_lock and hh_data be a in a separate | 
 | 220 |  * cache line on SMP. | 
 | 221 |  * They are mostly read, but hh_refcnt may be changed quite frequently, | 
 | 222 |  * incurring cache line ping pongs. | 
 | 223 |  */ | 
 | 224 | 	__be16		hh_type ____cacheline_aligned_in_smp; | 
 | 225 | 					/* protocol identifier, f.e ETH_P_IP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 |                                          *  NOTE:  For VLANs, this will be the | 
 | 227 |                                          *  encapuslated type. --BLG | 
 | 228 |                                          */ | 
| Arnaldo Carvalho de Melo | d5c42c0 | 2006-11-27 17:58:02 -0200 | [diff] [blame] | 229 | 	u16		hh_len;		/* length of header */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | 	int		(*hh_output)(struct sk_buff *skb); | 
| Stephen Hemminger | 3644f0c | 2006-12-07 15:08:17 -0800 | [diff] [blame] | 231 | 	seqlock_t	hh_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 |  | 
 | 233 | 	/* cached hardware header; allow for machine alignment needs.        */ | 
 | 234 | #define HH_DATA_MOD	16 | 
 | 235 | #define HH_DATA_OFF(__len) \ | 
| Jiri Benc | 5ba0eac | 2005-06-02 16:48:05 -0700 | [diff] [blame] | 236 | 	(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | #define HH_DATA_ALIGN(__len) \ | 
 | 238 | 	(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | 
 | 239 | 	unsigned long	hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | 
 | 240 | }; | 
 | 241 |  | 
 | 242 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | 
 | 243 |  * Alternative is: | 
 | 244 |  *   dev->hard_header_len ? (dev->hard_header_len + | 
 | 245 |  *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | 
 | 246 |  * | 
 | 247 |  * We could use other alignment values, but we must maintain the | 
 | 248 |  * relationship HH alignment <= LL alignment. | 
 | 249 |  */ | 
 | 250 | #define LL_RESERVED_SPACE(dev) \ | 
 | 251 | 	(((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 
 | 252 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ | 
 | 253 | 	((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 
 | 254 |  | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 255 | struct header_ops { | 
 | 256 | 	int	(*create) (struct sk_buff *skb, struct net_device *dev, | 
 | 257 | 			   unsigned short type, const void *daddr, | 
 | 258 | 			   const void *saddr, unsigned len); | 
 | 259 | 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr); | 
 | 260 | 	int	(*rebuild)(struct sk_buff *skb); | 
 | 261 | #define HAVE_HEADER_CACHE | 
 | 262 | 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh); | 
 | 263 | 	void	(*cache_update)(struct hh_cache *hh, | 
 | 264 | 				const struct net_device *dev, | 
 | 265 | 				const unsigned char *haddr); | 
 | 266 | }; | 
 | 267 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | /* These flag bits are private to the generic network queueing | 
 | 269 |  * layer, they may not be explicitly referenced by any other | 
 | 270 |  * code. | 
 | 271 |  */ | 
 | 272 |  | 
 | 273 | enum netdev_state_t | 
 | 274 | { | 
 | 275 | 	__LINK_STATE_XOFF=0, | 
 | 276 | 	__LINK_STATE_START, | 
 | 277 | 	__LINK_STATE_PRESENT, | 
 | 278 | 	__LINK_STATE_SCHED, | 
 | 279 | 	__LINK_STATE_NOCARRIER, | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 280 | 	__LINK_STATE_LINKWATCH_PENDING, | 
 | 281 | 	__LINK_STATE_DORMANT, | 
| Herbert Xu | 48d8332 | 2006-06-19 23:57:59 -0700 | [diff] [blame] | 282 | 	__LINK_STATE_QDISC_RUNNING, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | }; | 
 | 284 |  | 
 | 285 |  | 
 | 286 | /* | 
 | 287 |  * This structure holds at boot time configured netdevice settings. They | 
 | 288 |  * are then used in the device probing.  | 
 | 289 |  */ | 
 | 290 | struct netdev_boot_setup { | 
 | 291 | 	char name[IFNAMSIZ]; | 
 | 292 | 	struct ifmap map; | 
 | 293 | }; | 
 | 294 | #define NETDEV_BOOT_SETUP_MAX 8 | 
 | 295 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 296 | extern int __init netdev_boot_setup(char *str); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 |  | 
 | 298 | /* | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 299 |  * Structure for NAPI scheduling similar to tasklet but with weighting | 
 | 300 |  */ | 
 | 301 | struct napi_struct { | 
 | 302 | 	/* The poll_list must only be managed by the entity which | 
 | 303 | 	 * changes the state of the NAPI_STATE_SCHED bit.  This means | 
 | 304 | 	 * whoever atomically sets that bit can add this napi_struct | 
 | 305 | 	 * to the per-cpu poll_list, and whoever clears that bit | 
 | 306 | 	 * can remove from the list right before clearing the bit. | 
 | 307 | 	 */ | 
 | 308 | 	struct list_head	poll_list; | 
 | 309 |  | 
 | 310 | 	unsigned long		state; | 
 | 311 | 	int			weight; | 
 | 312 | 	int			(*poll)(struct napi_struct *, int); | 
 | 313 | #ifdef CONFIG_NETPOLL | 
 | 314 | 	spinlock_t		poll_lock; | 
 | 315 | 	int			poll_owner; | 
 | 316 | 	struct net_device	*dev; | 
 | 317 | 	struct list_head	dev_list; | 
 | 318 | #endif | 
 | 319 | }; | 
 | 320 |  | 
 | 321 | enum | 
 | 322 | { | 
 | 323 | 	NAPI_STATE_SCHED,	/* Poll is scheduled */ | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 324 | 	NAPI_STATE_DISABLE,	/* Disable pending */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 325 | }; | 
 | 326 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 327 | extern void __napi_schedule(struct napi_struct *n); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 328 |  | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 329 | static inline int napi_disable_pending(struct napi_struct *n) | 
 | 330 | { | 
 | 331 | 	return test_bit(NAPI_STATE_DISABLE, &n->state); | 
 | 332 | } | 
 | 333 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 334 | /** | 
 | 335 |  *	napi_schedule_prep - check if napi can be scheduled | 
 | 336 |  *	@n: napi context | 
 | 337 |  * | 
 | 338 |  * Test if NAPI routine is already running, and if not mark | 
 | 339 |  * it as running.  This is used as a condition variable | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 340 |  * insure only one NAPI poll instance runs.  We also make | 
 | 341 |  * sure there is no pending NAPI disable. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 342 |  */ | 
 | 343 | static inline int napi_schedule_prep(struct napi_struct *n) | 
 | 344 | { | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 345 | 	return !napi_disable_pending(n) && | 
 | 346 | 		!test_and_set_bit(NAPI_STATE_SCHED, &n->state); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 347 | } | 
 | 348 |  | 
 | 349 | /** | 
 | 350 |  *	napi_schedule - schedule NAPI poll | 
 | 351 |  *	@n: napi context | 
 | 352 |  * | 
 | 353 |  * Schedule NAPI poll routine to be called if it is not already | 
 | 354 |  * running. | 
 | 355 |  */ | 
 | 356 | static inline void napi_schedule(struct napi_struct *n) | 
 | 357 | { | 
 | 358 | 	if (napi_schedule_prep(n)) | 
 | 359 | 		__napi_schedule(n); | 
 | 360 | } | 
 | 361 |  | 
| Roland Dreier | bfe13f5 | 2007-10-09 15:47:37 -0700 | [diff] [blame] | 362 | /* Try to reschedule poll. Called by dev->poll() after napi_complete().  */ | 
 | 363 | static inline int napi_reschedule(struct napi_struct *napi) | 
 | 364 | { | 
 | 365 | 	if (napi_schedule_prep(napi)) { | 
 | 366 | 		__napi_schedule(napi); | 
 | 367 | 		return 1; | 
 | 368 | 	} | 
 | 369 | 	return 0; | 
 | 370 | } | 
 | 371 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 372 | /** | 
 | 373 |  *	napi_complete - NAPI processing complete | 
 | 374 |  *	@n: napi context | 
 | 375 |  * | 
 | 376 |  * Mark NAPI processing as complete. | 
 | 377 |  */ | 
 | 378 | static inline void __napi_complete(struct napi_struct *n) | 
 | 379 | { | 
 | 380 | 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | 
 | 381 | 	list_del(&n->poll_list); | 
 | 382 | 	smp_mb__before_clear_bit(); | 
 | 383 | 	clear_bit(NAPI_STATE_SCHED, &n->state); | 
 | 384 | } | 
 | 385 |  | 
 | 386 | static inline void napi_complete(struct napi_struct *n) | 
 | 387 | { | 
| David S. Miller | 50fd440 | 2008-03-27 17:42:50 -0700 | [diff] [blame] | 388 | 	unsigned long flags; | 
 | 389 |  | 
 | 390 | 	local_irq_save(flags); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 391 | 	__napi_complete(n); | 
| David S. Miller | 50fd440 | 2008-03-27 17:42:50 -0700 | [diff] [blame] | 392 | 	local_irq_restore(flags); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 393 | } | 
 | 394 |  | 
 | 395 | /** | 
 | 396 |  *	napi_disable - prevent NAPI from scheduling | 
 | 397 |  *	@n: napi context | 
 | 398 |  * | 
 | 399 |  * Stop NAPI from being scheduled on this context. | 
 | 400 |  * Waits till any outstanding processing completes. | 
 | 401 |  */ | 
 | 402 | static inline void napi_disable(struct napi_struct *n) | 
 | 403 | { | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 404 | 	set_bit(NAPI_STATE_DISABLE, &n->state); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 405 | 	while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) | 
| Benjamin Herrenschmidt | 43cc738 | 2007-10-26 04:23:22 -0700 | [diff] [blame] | 406 | 		msleep(1); | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 407 | 	clear_bit(NAPI_STATE_DISABLE, &n->state); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 408 | } | 
 | 409 |  | 
 | 410 | /** | 
 | 411 |  *	napi_enable - enable NAPI scheduling | 
 | 412 |  *	@n: napi context | 
 | 413 |  * | 
 | 414 |  * Resume NAPI from being scheduled on this context. | 
 | 415 |  * Must be paired with napi_disable. | 
 | 416 |  */ | 
 | 417 | static inline void napi_enable(struct napi_struct *n) | 
 | 418 | { | 
 | 419 | 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | 
 | 420 | 	smp_mb__before_clear_bit(); | 
 | 421 | 	clear_bit(NAPI_STATE_SCHED, &n->state); | 
 | 422 | } | 
 | 423 |  | 
| Stephen Hemminger | c264c3d | 2007-10-17 13:26:41 -0700 | [diff] [blame] | 424 | #ifdef CONFIG_SMP | 
 | 425 | /** | 
 | 426 |  *	napi_synchronize - wait until NAPI is not running | 
 | 427 |  *	@n: napi context | 
 | 428 |  * | 
 | 429 |  * Wait until NAPI is done being scheduled on this context. | 
 | 430 |  * Waits till any outstanding processing completes but | 
 | 431 |  * does not disable future activations. | 
 | 432 |  */ | 
 | 433 | static inline void napi_synchronize(const struct napi_struct *n) | 
 | 434 | { | 
 | 435 | 	while (test_bit(NAPI_STATE_SCHED, &n->state)) | 
 | 436 | 		msleep(1); | 
 | 437 | } | 
 | 438 | #else | 
 | 439 | # define napi_synchronize(n)	barrier() | 
 | 440 | #endif | 
 | 441 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 442 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 |  *	The DEVICE structure. | 
 | 444 |  *	Actually, this whole structure is a big mistake.  It mixes I/O | 
 | 445 |  *	data with strictly "high-level" data, and it has to know about | 
 | 446 |  *	almost every data structure used in the INET module. | 
 | 447 |  * | 
 | 448 |  *	FIXME: cleanup struct net_device such that network protocol info | 
 | 449 |  *	moves out. | 
 | 450 |  */ | 
 | 451 |  | 
 | 452 | struct net_device | 
 | 453 | { | 
 | 454 |  | 
 | 455 | 	/* | 
 | 456 | 	 * This is the first field of the "visible" part of this structure | 
 | 457 | 	 * (i.e. as seen by users in the "Space.c" file).  It is the name | 
 | 458 | 	 * the interface. | 
 | 459 | 	 */ | 
 | 460 | 	char			name[IFNAMSIZ]; | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 461 | 	/* device name hash chain */ | 
 | 462 | 	struct hlist_node	name_hlist; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 |  | 
 | 464 | 	/* | 
 | 465 | 	 *	I/O specific fields | 
 | 466 | 	 *	FIXME: Merge these and struct ifmap into one | 
 | 467 | 	 */ | 
 | 468 | 	unsigned long		mem_end;	/* shared mem end	*/ | 
 | 469 | 	unsigned long		mem_start;	/* shared mem start	*/ | 
 | 470 | 	unsigned long		base_addr;	/* device I/O address	*/ | 
 | 471 | 	unsigned int		irq;		/* device IRQ number	*/ | 
 | 472 |  | 
 | 473 | 	/* | 
 | 474 | 	 *	Some hardware also needs these fields, but they are not | 
 | 475 | 	 *	part of the usual set specified in Space.c. | 
 | 476 | 	 */ | 
 | 477 |  | 
 | 478 | 	unsigned char		if_port;	/* Selectable AUI, TP,..*/ | 
 | 479 | 	unsigned char		dma;		/* DMA channel		*/ | 
 | 480 |  | 
 | 481 | 	unsigned long		state; | 
 | 482 |  | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 483 | 	struct list_head	dev_list; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 484 | #ifdef CONFIG_NETPOLL | 
 | 485 | 	struct list_head	napi_list; | 
 | 486 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | 	 | 
 | 488 | 	/* The device initialization function. Called only once. */ | 
 | 489 | 	int			(*init)(struct net_device *dev); | 
 | 490 |  | 
 | 491 | 	/* ------- Fields preinitialized in Space.c finish here ------- */ | 
 | 492 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 493 | 	/* Net device features */ | 
 | 494 | 	unsigned long		features; | 
 | 495 | #define NETIF_F_SG		1	/* Scatter/gather IO. */ | 
| Stephen Hemminger | d212f87 | 2007-06-27 00:47:37 -0700 | [diff] [blame] | 496 | #define NETIF_F_IP_CSUM		2	/* Can checksum TCP/UDP over IPv4. */ | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 497 | #define NETIF_F_NO_CSUM		4	/* Does not require checksum. F.e. loopack. */ | 
 | 498 | #define NETIF_F_HW_CSUM		8	/* Can checksum all the packets. */ | 
| Stephen Hemminger | d212f87 | 2007-06-27 00:47:37 -0700 | [diff] [blame] | 499 | #define NETIF_F_IPV6_CSUM	16	/* Can checksum TCP/UDP over IPV6 */ | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 500 | #define NETIF_F_HIGHDMA		32	/* Can DMA to high memory. */ | 
 | 501 | #define NETIF_F_FRAGLIST	64	/* Scatter/gather IO. */ | 
 | 502 | #define NETIF_F_HW_VLAN_TX	128	/* Transmit VLAN hw acceleration */ | 
 | 503 | #define NETIF_F_HW_VLAN_RX	256	/* Receive VLAN hw acceleration */ | 
 | 504 | #define NETIF_F_HW_VLAN_FILTER	512	/* Receive filtering on VLAN */ | 
 | 505 | #define NETIF_F_VLAN_CHALLENGED	1024	/* Device cannot handle VLAN packets */ | 
| Herbert Xu | 37c3185 | 2006-06-22 03:07:29 -0700 | [diff] [blame] | 506 | #define NETIF_F_GSO		2048	/* Enable software GSO. */ | 
| Christian Borntraeger | e24eb52 | 2007-09-25 19:42:02 -0700 | [diff] [blame] | 507 | #define NETIF_F_LLTX		4096	/* LockLess TX - deprecated. Please */ | 
 | 508 | 					/* do not use LLTX in new drivers */ | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 509 | #define NETIF_F_NETNS_LOCAL	8192	/* Does not change network namespaces */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 510 | #define NETIF_F_MULTI_QUEUE	16384	/* Has multiple TX/RX queues */ | 
| Jeff Garzik | 3ae7c0b | 2007-08-15 16:00:51 -0700 | [diff] [blame] | 511 | #define NETIF_F_LRO		32768	/* large receive offload */ | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 512 |  | 
 | 513 | 	/* Segmentation offload features */ | 
 | 514 | #define NETIF_F_GSO_SHIFT	16 | 
| Herbert Xu | bcd7611 | 2006-06-30 13:36:35 -0700 | [diff] [blame] | 515 | #define NETIF_F_GSO_MASK	0xffff0000 | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 516 | #define NETIF_F_TSO		(SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) | 
| Herbert Xu | f83ef8c | 2006-06-30 13:37:03 -0700 | [diff] [blame] | 517 | #define NETIF_F_UFO		(SKB_GSO_UDP << NETIF_F_GSO_SHIFT) | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 518 | #define NETIF_F_GSO_ROBUST	(SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) | 
| Herbert Xu | f83ef8c | 2006-06-30 13:37:03 -0700 | [diff] [blame] | 519 | #define NETIF_F_TSO_ECN		(SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) | 
 | 520 | #define NETIF_F_TSO6		(SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 521 |  | 
| Herbert Xu | 78eb887 | 2006-08-17 18:22:32 -0700 | [diff] [blame] | 522 | 	/* List of features with software fallbacks. */ | 
 | 523 | #define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) | 
 | 524 |  | 
| Stephen Hemminger | d212f87 | 2007-06-27 00:47:37 -0700 | [diff] [blame] | 525 |  | 
| Herbert Xu | 8648b30 | 2006-06-17 22:06:05 -0700 | [diff] [blame] | 526 | #define NETIF_F_GEN_CSUM	(NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) | 
| Stephen Hemminger | d212f87 | 2007-06-27 00:47:37 -0700 | [diff] [blame] | 527 | #define NETIF_F_V4_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) | 
 | 528 | #define NETIF_F_V6_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | 
 | 529 | #define NETIF_F_ALL_CSUM	(NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | 
| Herbert Xu | 8648b30 | 2006-06-17 22:06:05 -0700 | [diff] [blame] | 530 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | 	struct net_device	*next_sched; | 
 | 532 |  | 
 | 533 | 	/* Interface index. Unique device identifier	*/ | 
 | 534 | 	int			ifindex; | 
 | 535 | 	int			iflink; | 
 | 536 |  | 
 | 537 |  | 
 | 538 | 	struct net_device_stats* (*get_stats)(struct net_device *dev); | 
| Rusty Russell | c45d286 | 2007-03-28 14:29:08 -0700 | [diff] [blame] | 539 | 	struct net_device_stats	stats; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 |  | 
| Johannes Berg | b86e028 | 2007-04-26 20:48:23 -0700 | [diff] [blame] | 541 | #ifdef CONFIG_WIRELESS_EXT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | 	/* List of functions to handle Wireless Extensions (instead of ioctl). | 
 | 543 | 	 * See <net/iw_handler.h> for details. Jean II */ | 
 | 544 | 	const struct iw_handler_def *	wireless_handlers; | 
 | 545 | 	/* Instance data managed by the core of Wireless Extensions. */ | 
 | 546 | 	struct iw_public_data *	wireless_data; | 
| Johannes Berg | b86e028 | 2007-04-26 20:48:23 -0700 | [diff] [blame] | 547 | #endif | 
| Stephen Hemminger | 76fd859 | 2006-09-08 11:16:13 -0700 | [diff] [blame] | 548 | 	const struct ethtool_ops *ethtool_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 |  | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 550 | 	/* Hardware header description */ | 
 | 551 | 	const struct header_ops *header_ops; | 
 | 552 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | 	/* | 
 | 554 | 	 * This marks the end of the "visible" part of the structure. All | 
 | 555 | 	 * fields hereafter are internal to the system, and may change at | 
 | 556 | 	 * will (read: may be cleaned up at will). | 
 | 557 | 	 */ | 
 | 558 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 |  | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 560 | 	unsigned int		flags;	/* interface flags (a la BSD)	*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | 	unsigned short		gflags; | 
 | 562 |         unsigned short          priv_flags; /* Like 'flags' but invisible to userspace. */ | 
 | 563 | 	unsigned short		padded;	/* How much padding added by alloc_netdev() */ | 
 | 564 |  | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 565 | 	unsigned char		operstate; /* RFC2863 operstate */ | 
 | 566 | 	unsigned char		link_mode; /* mapping policy to operstate */ | 
 | 567 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | 	unsigned		mtu;	/* interface MTU value		*/ | 
 | 569 | 	unsigned short		type;	/* interface hardware type	*/ | 
 | 570 | 	unsigned short		hard_header_len;	/* hardware hdr length	*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 |  | 
 | 572 | 	struct net_device	*master; /* Pointer to master device of a group, | 
 | 573 | 					  * which this device is member of. | 
 | 574 | 					  */ | 
 | 575 |  | 
 | 576 | 	/* Interface address info. */ | 
| Jon Wetzel | a6f9a70 | 2005-08-20 17:15:54 -0700 | [diff] [blame] | 577 | 	unsigned char		perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | 	unsigned char		addr_len;	/* hardware address length	*/ | 
 | 579 | 	unsigned short          dev_id;		/* for shared network cards */ | 
 | 580 |  | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 581 | 	struct dev_addr_list	*uc_list;	/* Secondary unicast mac addresses */ | 
 | 582 | 	int			uc_count;	/* Number of installed ucasts	*/ | 
 | 583 | 	int			uc_promisc; | 
| Patrick McHardy | 3fba5a8 | 2007-06-27 01:26:58 -0700 | [diff] [blame] | 584 | 	struct dev_addr_list	*mc_list;	/* Multicast mac addresses	*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | 	int			mc_count;	/* Number of installed mcasts	*/ | 
 | 586 | 	int			promiscuity; | 
 | 587 | 	int			allmulti; | 
 | 588 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 |  | 
 | 590 | 	/* Protocol specific pointers */ | 
 | 591 | 	 | 
 | 592 | 	void 			*atalk_ptr;	/* AppleTalk link 	*/ | 
 | 593 | 	void			*ip_ptr;	/* IPv4 specific data	*/   | 
 | 594 | 	void                    *dn_ptr;        /* DECnet specific data */ | 
 | 595 | 	void                    *ip6_ptr;       /* IPv6 specific data */ | 
 | 596 | 	void			*ec_ptr;	/* Econet specific data	*/ | 
 | 597 | 	void			*ax25_ptr;	/* AX.25 specific data */ | 
| Johannes Berg | 704232c | 2007-04-23 12:20:05 -0700 | [diff] [blame] | 598 | 	struct wireless_dev	*ieee80211_ptr;	/* IEEE 802.11 specific data, | 
 | 599 | 						   assign before registering */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 601 | /* | 
 | 602 |  * Cache line mostly used on receive path (including eth_type_trans()) | 
 | 603 |  */ | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 604 | 	unsigned long		last_rx;	/* Time of last Rx	*/ | 
 | 605 | 	/* Interface address info used in eth_type_trans() */ | 
 | 606 | 	unsigned char		dev_addr[MAX_ADDR_LEN];	/* hw address, (before bcast  | 
 | 607 | 							because most packets are unicast) */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 609 | 	unsigned char		broadcast[MAX_ADDR_LEN];	/* hw bcast add	*/ | 
 | 610 |  | 
| Neil Turton | d5bd014 | 2008-02-12 23:13:48 -0800 | [diff] [blame] | 611 | 	/* ingress path synchronizer */ | 
 | 612 | 	spinlock_t		ingress_lock; | 
 | 613 | 	struct Qdisc		*qdisc_ingress; | 
 | 614 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 615 | /* | 
 | 616 |  * Cache line mostly used on queue transmit path (qdisc) | 
 | 617 |  */ | 
 | 618 | 	/* device queue lock */ | 
 | 619 | 	spinlock_t		queue_lock ____cacheline_aligned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | 	struct Qdisc		*qdisc; | 
 | 621 | 	struct Qdisc		*qdisc_sleeping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | 	struct list_head	qdisc_list; | 
 | 623 | 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */ | 
 | 624 |  | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 625 | 	/* Partially transmitted GSO packet. */ | 
 | 626 | 	struct sk_buff		*gso_skb; | 
 | 627 |  | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 628 | /* | 
 | 629 |  * One part is mostly used on xmit path (device) | 
 | 630 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | 	/* hard_start_xmit synchronizer */ | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 632 | 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | 	/* cpu id of processor entered to hard_start_xmit or -1, | 
 | 634 | 	   if nobody entered there. | 
 | 635 | 	 */ | 
 | 636 | 	int			xmit_lock_owner; | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 637 | 	void			*priv;	/* pointer to private data	*/ | 
 | 638 | 	int			(*hard_start_xmit) (struct sk_buff *skb, | 
 | 639 | 						    struct net_device *dev); | 
 | 640 | 	/* These may be needed for future network-power-down code. */ | 
 | 641 | 	unsigned long		trans_start;	/* Time (in jiffies) of last Tx	*/ | 
 | 642 |  | 
 | 643 | 	int			watchdog_timeo; /* used by dev_watchdog() */ | 
 | 644 | 	struct timer_list	watchdog_timer; | 
 | 645 |  | 
 | 646 | /* | 
 | 647 |  * refcnt is a very hot point, so align it on SMP | 
 | 648 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | 	/* Number of references to this device */ | 
| Eric Dumazet | 9356b8f | 2005-09-27 15:23:16 -0700 | [diff] [blame] | 650 | 	atomic_t		refcnt ____cacheline_aligned_in_smp; | 
 | 651 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | 	/* delayed register/unregister */ | 
 | 653 | 	struct list_head	todo_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | 	/* device index hash chain */ | 
 | 655 | 	struct hlist_node	index_hlist; | 
 | 656 |  | 
| Herbert Xu | 572a103 | 2007-05-08 18:34:17 -0700 | [diff] [blame] | 657 | 	struct net_device	*link_watch_next; | 
 | 658 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | 	/* register/unregister state machine */ | 
 | 660 | 	enum { NETREG_UNINITIALIZED=0, | 
| Stephen Hemminger | b17a7c1 | 2006-05-10 13:21:17 -0700 | [diff] [blame] | 661 | 	       NETREG_REGISTERED,	/* completed register_netdevice */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | 	       NETREG_UNREGISTERING,	/* called unregister_netdevice */ | 
 | 663 | 	       NETREG_UNREGISTERED,	/* completed unregister todo */ | 
 | 664 | 	       NETREG_RELEASED,		/* called free_netdev */ | 
 | 665 | 	} reg_state; | 
 | 666 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | 	/* Called after device is detached from network. */ | 
 | 668 | 	void			(*uninit)(struct net_device *dev); | 
 | 669 | 	/* Called after last user reference disappears. */ | 
 | 670 | 	void			(*destructor)(struct net_device *dev); | 
 | 671 |  | 
 | 672 | 	/* Pointers to interface service routines.	*/ | 
 | 673 | 	int			(*open)(struct net_device *dev); | 
 | 674 | 	int			(*stop)(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | #define HAVE_NETDEV_POLL | 
| Patrick McHardy | 2402345 | 2007-07-14 18:51:31 -0700 | [diff] [blame] | 676 | #define HAVE_CHANGE_RX_FLAGS | 
 | 677 | 	void			(*change_rx_flags)(struct net_device *dev, | 
 | 678 | 						   int flags); | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 679 | #define HAVE_SET_RX_MODE | 
 | 680 | 	void			(*set_rx_mode)(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | #define HAVE_MULTICAST			  | 
 | 682 | 	void			(*set_multicast_list)(struct net_device *dev); | 
 | 683 | #define HAVE_SET_MAC_ADDR  		  | 
 | 684 | 	int			(*set_mac_address)(struct net_device *dev, | 
 | 685 | 						   void *addr); | 
| Jeff Garzik | bada339 | 2007-10-23 20:19:37 -0700 | [diff] [blame] | 686 | #define HAVE_VALIDATE_ADDR | 
 | 687 | 	int			(*validate_addr)(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | #define HAVE_PRIVATE_IOCTL | 
 | 689 | 	int			(*do_ioctl)(struct net_device *dev, | 
 | 690 | 					    struct ifreq *ifr, int cmd); | 
 | 691 | #define HAVE_SET_CONFIG | 
 | 692 | 	int			(*set_config)(struct net_device *dev, | 
 | 693 | 					      struct ifmap *map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | #define HAVE_CHANGE_MTU | 
 | 695 | 	int			(*change_mtu)(struct net_device *dev, int new_mtu); | 
 | 696 |  | 
 | 697 | #define HAVE_TX_TIMEOUT | 
 | 698 | 	void			(*tx_timeout) (struct net_device *dev); | 
 | 699 |  | 
 | 700 | 	void			(*vlan_rx_register)(struct net_device *dev, | 
 | 701 | 						    struct vlan_group *grp); | 
 | 702 | 	void			(*vlan_rx_add_vid)(struct net_device *dev, | 
 | 703 | 						   unsigned short vid); | 
 | 704 | 	void			(*vlan_rx_kill_vid)(struct net_device *dev, | 
 | 705 | 						    unsigned short vid); | 
 | 706 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | 	int			(*neigh_setup)(struct net_device *dev, struct neigh_parms *); | 
 | 708 | #ifdef CONFIG_NETPOLL | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 709 | 	struct netpoll_info	*npinfo; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | #endif | 
 | 711 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 712 | 	void                    (*poll_controller)(struct net_device *dev); | 
 | 713 | #endif | 
 | 714 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 715 | #ifdef CONFIG_NET_NS | 
| Eric W. Biederman | 4a1c537 | 2007-09-12 11:56:32 +0200 | [diff] [blame] | 716 | 	/* Network namespace this network device is inside */ | 
 | 717 | 	struct net		*nd_net; | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 718 | #endif | 
| Eric W. Biederman | 4a1c537 | 2007-09-12 11:56:32 +0200 | [diff] [blame] | 719 |  | 
| David S. Miller | 4951704 | 2008-05-12 03:29:11 -0700 | [diff] [blame] | 720 | 	/* mid-layer private */ | 
 | 721 | 	void			*ml_priv; | 
 | 722 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | 	/* bridge stuff */ | 
 | 724 | 	struct net_bridge_port	*br_port; | 
| Patrick McHardy | b863ceb | 2007-07-14 18:55:06 -0700 | [diff] [blame] | 725 | 	/* macvlan */ | 
 | 726 | 	struct macvlan_port	*macvlan_port; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | 	/* class/net/name entry */ | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 729 | 	struct device		dev; | 
| Stephen Hemminger | fe9925b | 2006-05-06 17:56:03 -0700 | [diff] [blame] | 730 | 	/* space for optional statistics and wireless sysfs groups */ | 
 | 731 | 	struct attribute_group  *sysfs_groups[3]; | 
| Patrick McHardy | 38f7b87 | 2007-06-13 12:03:51 -0700 | [diff] [blame] | 732 |  | 
 | 733 | 	/* rtnetlink link ops */ | 
 | 734 | 	const struct rtnl_link_ops *rtnl_link_ops; | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 735 |  | 
| Peter P Waskiewicz Jr | 82cc1a7 | 2008-03-21 03:43:19 -0700 | [diff] [blame] | 736 | 	/* for setting kernel sock attribute on TCP connection setup */ | 
 | 737 | #define GSO_MAX_SIZE		65536 | 
 | 738 | 	unsigned int		gso_max_size; | 
 | 739 |  | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 740 | 	/* The TX queue control structures */ | 
 | 741 | 	unsigned int			egress_subqueue_count; | 
| Patrick McHardy | 31ce72a | 2007-07-20 19:45:45 -0700 | [diff] [blame] | 742 | 	struct net_device_subqueue	egress_subqueue[1]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | }; | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 744 | #define to_net_dev(d) container_of(d, struct net_device, dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 |  | 
 | 746 | #define	NETDEV_ALIGN		32 | 
 | 747 | #define	NETDEV_ALIGN_CONST	(NETDEV_ALIGN - 1) | 
 | 748 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 749 | /* | 
 | 750 |  * Net namespace inlines | 
 | 751 |  */ | 
 | 752 | static inline | 
 | 753 | struct net *dev_net(const struct net_device *dev) | 
 | 754 | { | 
 | 755 | #ifdef CONFIG_NET_NS | 
 | 756 | 	return dev->nd_net; | 
 | 757 | #else | 
 | 758 | 	return &init_net; | 
 | 759 | #endif | 
 | 760 | } | 
 | 761 |  | 
 | 762 | static inline | 
| Denis V. Lunev | f5aa23f | 2008-03-26 00:48:17 -0700 | [diff] [blame] | 763 | void dev_net_set(struct net_device *dev, struct net *net) | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 764 | { | 
 | 765 | #ifdef CONFIG_NET_NS | 
| Denis V. Lunev | f3005d7 | 2008-04-16 02:02:18 -0700 | [diff] [blame] | 766 | 	release_net(dev->nd_net); | 
 | 767 | 	dev->nd_net = hold_net(net); | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 768 | #endif | 
 | 769 | } | 
 | 770 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 771 | /** | 
 | 772 |  *	netdev_priv - access network device private data | 
 | 773 |  *	@dev: network device | 
 | 774 |  * | 
 | 775 |  * Get network device private data | 
 | 776 |  */ | 
| Patrick McHardy | 6472ce6 | 2007-06-13 12:03:21 -0700 | [diff] [blame] | 777 | static inline void *netdev_priv(const struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | { | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 779 | 	return dev->priv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | } | 
 | 781 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | /* Set the sysfs physical device reference for the network logical device | 
 | 783 |  * if set prior to registration will cause a symlink during initialization. | 
 | 784 |  */ | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 785 | #define SET_NETDEV_DEV(net, pdev)	((net)->dev.parent = (pdev)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 |  | 
| Stephen Hemminger | 3b582cc | 2007-11-01 02:21:47 -0700 | [diff] [blame] | 787 | /** | 
 | 788 |  *	netif_napi_add - initialize a napi context | 
 | 789 |  *	@dev:  network device | 
 | 790 |  *	@napi: napi context | 
 | 791 |  *	@poll: polling function | 
 | 792 |  *	@weight: default weight | 
 | 793 |  * | 
 | 794 |  * netif_napi_add() must be used to initialize a napi context prior to calling | 
 | 795 |  * *any* of the other napi related functions. | 
 | 796 |  */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 797 | static inline void netif_napi_add(struct net_device *dev, | 
 | 798 | 				  struct napi_struct *napi, | 
 | 799 | 				  int (*poll)(struct napi_struct *, int), | 
 | 800 | 				  int weight) | 
 | 801 | { | 
 | 802 | 	INIT_LIST_HEAD(&napi->poll_list); | 
 | 803 | 	napi->poll = poll; | 
 | 804 | 	napi->weight = weight; | 
 | 805 | #ifdef CONFIG_NETPOLL | 
 | 806 | 	napi->dev = dev; | 
 | 807 | 	list_add(&napi->dev_list, &dev->napi_list); | 
 | 808 | 	spin_lock_init(&napi->poll_lock); | 
 | 809 | 	napi->poll_owner = -1; | 
 | 810 | #endif | 
 | 811 | 	set_bit(NAPI_STATE_SCHED, &napi->state); | 
 | 812 | } | 
 | 813 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | struct packet_type { | 
| David S. Miller | f2ccd8f | 2005-08-09 19:34:12 -0700 | [diff] [blame] | 815 | 	__be16			type;	/* This is really htons(ether_type). */ | 
 | 816 | 	struct net_device	*dev;	/* NULL is wildcarded here	     */ | 
 | 817 | 	int			(*func) (struct sk_buff *, | 
 | 818 | 					 struct net_device *, | 
 | 819 | 					 struct packet_type *, | 
 | 820 | 					 struct net_device *); | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 821 | 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb, | 
 | 822 | 						int features); | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 823 | 	int			(*gso_send_check)(struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | 	void			*af_packet_priv; | 
 | 825 | 	struct list_head	list; | 
 | 826 | }; | 
 | 827 |  | 
 | 828 | #include <linux/interrupt.h> | 
 | 829 | #include <linux/notifier.h> | 
 | 830 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | extern rwlock_t				dev_base_lock;		/* Device list lock */ | 
 | 832 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 833 |  | 
 | 834 | #define for_each_netdev(net, d)		\ | 
 | 835 | 		list_for_each_entry(d, &(net)->dev_base_head, dev_list) | 
 | 836 | #define for_each_netdev_safe(net, d, n)	\ | 
 | 837 | 		list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | 
 | 838 | #define for_each_netdev_continue(net, d)		\ | 
 | 839 | 		list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 840 | #define net_device_entry(lh)	list_entry(lh, struct net_device, dev_list) | 
 | 841 |  | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 842 | static inline struct net_device *next_net_device(struct net_device *dev) | 
 | 843 | { | 
 | 844 | 	struct list_head *lh; | 
 | 845 | 	struct net *net; | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 846 |  | 
| YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 847 | 	net = dev_net(dev); | 
| Daniel Lezcano | a050c33 | 2007-09-12 14:57:09 +0200 | [diff] [blame] | 848 | 	lh = dev->dev_list.next; | 
 | 849 | 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | 
 | 850 | } | 
 | 851 |  | 
 | 852 | static inline struct net_device *first_net_device(struct net *net) | 
 | 853 | { | 
 | 854 | 	return list_empty(&net->dev_base_head) ? NULL : | 
 | 855 | 		net_device_entry(net->dev_base_head.next); | 
 | 856 | } | 
| Pavel Emelianov | 7562f87 | 2007-05-03 15:13:45 -0700 | [diff] [blame] | 857 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | extern int 			netdev_boot_setup_check(struct net_device *dev); | 
 | 859 | extern unsigned long		netdev_boot_base(const char *prefix, int unit); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 860 | extern struct net_device    *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); | 
 | 861 | extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | 
 | 862 | extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | extern void		dev_add_pack(struct packet_type *pt); | 
 | 864 | extern void		dev_remove_pack(struct packet_type *pt); | 
 | 865 | extern void		__dev_remove_pack(struct packet_type *pt); | 
 | 866 |  | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 867 | extern struct net_device	*dev_get_by_flags(struct net *net, unsigned short flags, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | 						  unsigned short mask); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 869 | extern struct net_device	*dev_get_by_name(struct net *net, const char *name); | 
 | 870 | extern struct net_device	*__dev_get_by_name(struct net *net, const char *name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | extern int		dev_alloc_name(struct net_device *dev, const char *name); | 
 | 872 | extern int		dev_open(struct net_device *dev); | 
 | 873 | extern int		dev_close(struct net_device *dev); | 
 | 874 | extern int		dev_queue_xmit(struct sk_buff *skb); | 
 | 875 | extern int		register_netdevice(struct net_device *dev); | 
| Stephen Hemminger | 22f8cde | 2007-02-07 00:09:58 -0800 | [diff] [blame] | 876 | extern void		unregister_netdevice(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | extern void		free_netdev(struct net_device *dev); | 
 | 878 | extern void		synchronize_net(void); | 
 | 879 | extern int 		register_netdevice_notifier(struct notifier_block *nb); | 
 | 880 | extern int		unregister_netdevice_notifier(struct notifier_block *nb); | 
| Eric W. Biederman | ad7379d | 2007-09-16 15:33:32 -0700 | [diff] [blame] | 881 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 882 | extern struct net_device	*dev_get_by_index(struct net *net, int ifindex); | 
 | 883 | extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | extern int		dev_restart(struct net_device *dev); | 
 | 885 | #ifdef CONFIG_NETPOLL_TRAP | 
 | 886 | extern int		netpoll_trap(void); | 
 | 887 | #endif | 
 | 888 |  | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 889 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, | 
 | 890 | 				  unsigned short type, | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 891 | 				  const void *daddr, const void *saddr, | 
 | 892 | 				  unsigned len) | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 893 | { | 
| Ursula Braun | f1ecfd5 | 2007-10-22 16:16:14 +0200 | [diff] [blame] | 894 | 	if (!dev->header_ops || !dev->header_ops->create) | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 895 | 		return 0; | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 896 |  | 
 | 897 | 	return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | 
| Stephen Hemminger | 0c4e858 | 2007-10-09 01:36:32 -0700 | [diff] [blame] | 898 | } | 
 | 899 |  | 
| Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 900 | static inline int dev_parse_header(const struct sk_buff *skb, | 
 | 901 | 				   unsigned char *haddr) | 
 | 902 | { | 
 | 903 | 	const struct net_device *dev = skb->dev; | 
 | 904 |  | 
| Patrick McHardy | 1b83336 | 2007-10-18 05:09:28 -0700 | [diff] [blame] | 905 | 	if (!dev->header_ops || !dev->header_ops->parse) | 
| Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 906 | 		return 0; | 
| Stephen Hemminger | 3b04ddd | 2007-10-09 01:40:57 -0700 | [diff] [blame] | 907 | 	return dev->header_ops->parse(skb, haddr); | 
| Stephen Hemminger | b95cce3 | 2007-09-26 22:13:38 -0700 | [diff] [blame] | 908 | } | 
 | 909 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); | 
 | 911 | extern int		register_gifconf(unsigned int family, gifconf_func_t * gifconf); | 
 | 912 | static inline int unregister_gifconf(unsigned int family) | 
 | 913 | { | 
 | 914 | 	return register_gifconf(family, NULL); | 
 | 915 | } | 
 | 916 |  | 
 | 917 | /* | 
 | 918 |  * Incoming packets are placed on per-cpu queues so that | 
 | 919 |  * no locking is needed. | 
 | 920 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | struct softnet_data | 
 | 922 | { | 
| Stephen Hemminger | 31aa02c | 2005-06-23 20:12:48 -0700 | [diff] [blame] | 923 | 	struct net_device	*output_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | 	struct sk_buff_head	input_pkt_queue; | 
 | 925 | 	struct list_head	poll_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | 	struct sk_buff		*completion_queue; | 
 | 927 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 928 | 	struct napi_struct	backlog; | 
| Chris Leech | db21733 | 2006-06-17 21:24:58 -0700 | [diff] [blame] | 929 | #ifdef CONFIG_NET_DMA | 
 | 930 | 	struct dma_chan		*net_dma; | 
 | 931 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | }; | 
 | 933 |  | 
 | 934 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | 
 | 935 |  | 
 | 936 | #define HAVE_NETIF_QUEUE | 
 | 937 |  | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 938 | extern void __netif_schedule(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 939 |  | 
 | 940 | static inline void netif_schedule(struct net_device *dev) | 
 | 941 | { | 
 | 942 | 	if (!test_bit(__LINK_STATE_XOFF, &dev->state)) | 
 | 943 | 		__netif_schedule(dev); | 
 | 944 | } | 
 | 945 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 946 | /** | 
 | 947 |  *	netif_start_queue - allow transmit | 
 | 948 |  *	@dev: network device | 
 | 949 |  * | 
 | 950 |  *	Allow upper layers to call the device hard_start_xmit routine. | 
 | 951 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | static inline void netif_start_queue(struct net_device *dev) | 
 | 953 | { | 
 | 954 | 	clear_bit(__LINK_STATE_XOFF, &dev->state); | 
 | 955 | } | 
 | 956 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 957 | /** | 
 | 958 |  *	netif_wake_queue - restart transmit | 
 | 959 |  *	@dev: network device | 
 | 960 |  * | 
 | 961 |  *	Allow upper layers to call the device hard_start_xmit routine. | 
 | 962 |  *	Used for flow control when transmit resources are available. | 
 | 963 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | static inline void netif_wake_queue(struct net_device *dev) | 
 | 965 | { | 
 | 966 | #ifdef CONFIG_NETPOLL_TRAP | 
| Sergei Shtylyov | 5f286e1 | 2007-04-28 20:57:37 -0700 | [diff] [blame] | 967 | 	if (netpoll_trap()) { | 
 | 968 | 		clear_bit(__LINK_STATE_XOFF, &dev->state); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | 		return; | 
| Sergei Shtylyov | 5f286e1 | 2007-04-28 20:57:37 -0700 | [diff] [blame] | 970 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | #endif | 
 | 972 | 	if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) | 
 | 973 | 		__netif_schedule(dev); | 
 | 974 | } | 
 | 975 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 976 | /** | 
 | 977 |  *	netif_stop_queue - stop transmitted packets | 
 | 978 |  *	@dev: network device | 
 | 979 |  * | 
 | 980 |  *	Stop upper layers calling the device hard_start_xmit routine. | 
 | 981 |  *	Used for flow control when transmit resources are unavailable. | 
 | 982 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | static inline void netif_stop_queue(struct net_device *dev) | 
 | 984 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | 	set_bit(__LINK_STATE_XOFF, &dev->state); | 
 | 986 | } | 
 | 987 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 988 | /** | 
 | 989 |  *	netif_queue_stopped - test if transmit queue is flowblocked | 
 | 990 |  *	@dev: network device | 
 | 991 |  * | 
 | 992 |  *	Test if transmit queue on device is currently unable to send. | 
 | 993 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 | static inline int netif_queue_stopped(const struct net_device *dev) | 
 | 995 | { | 
 | 996 | 	return test_bit(__LINK_STATE_XOFF, &dev->state); | 
 | 997 | } | 
 | 998 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 999 | /** | 
 | 1000 |  *	netif_running - test if up | 
 | 1001 |  *	@dev: network device | 
 | 1002 |  * | 
 | 1003 |  *	Test if the device has been brought up. | 
 | 1004 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | static inline int netif_running(const struct net_device *dev) | 
 | 1006 | { | 
 | 1007 | 	return test_bit(__LINK_STATE_START, &dev->state); | 
 | 1008 | } | 
 | 1009 |  | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1010 | /* | 
 | 1011 |  * Routines to manage the subqueues on a device.  We only need start | 
 | 1012 |  * stop, and a check if it's stopped.  All other device management is | 
 | 1013 |  * done at the overall netdevice level. | 
 | 1014 |  * Also test the device if we're multiqueue. | 
 | 1015 |  */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1016 |  | 
 | 1017 | /** | 
 | 1018 |  *	netif_start_subqueue - allow sending packets on subqueue | 
 | 1019 |  *	@dev: network device | 
 | 1020 |  *	@queue_index: sub queue index | 
 | 1021 |  * | 
 | 1022 |  * Start individual transmit queue of a device with multiple transmit queues. | 
 | 1023 |  */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1024 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 
 | 1025 | { | 
 | 1026 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 
 | 1027 | 	clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | 
 | 1028 | #endif | 
 | 1029 | } | 
 | 1030 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1031 | /** | 
 | 1032 |  *	netif_stop_subqueue - stop sending packets on subqueue | 
 | 1033 |  *	@dev: network device | 
 | 1034 |  *	@queue_index: sub queue index | 
 | 1035 |  * | 
 | 1036 |  * Stop individual transmit queue of a device with multiple transmit queues. | 
 | 1037 |  */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1038 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | 
 | 1039 | { | 
 | 1040 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 
 | 1041 | #ifdef CONFIG_NETPOLL_TRAP | 
 | 1042 | 	if (netpoll_trap()) | 
 | 1043 | 		return; | 
 | 1044 | #endif | 
 | 1045 | 	set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | 
 | 1046 | #endif | 
 | 1047 | } | 
 | 1048 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1049 | /** | 
 | 1050 |  *	netif_subqueue_stopped - test status of subqueue | 
 | 1051 |  *	@dev: network device | 
 | 1052 |  *	@queue_index: sub queue index | 
 | 1053 |  * | 
 | 1054 |  * Check individual transmit queue of a device with multiple transmit queues. | 
 | 1055 |  */ | 
| Pavel Emelyanov | 668f895 | 2007-10-21 17:01:56 -0700 | [diff] [blame] | 1056 | static inline int __netif_subqueue_stopped(const struct net_device *dev, | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1057 | 					 u16 queue_index) | 
 | 1058 | { | 
 | 1059 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 
 | 1060 | 	return test_bit(__LINK_STATE_XOFF, | 
 | 1061 | 			&dev->egress_subqueue[queue_index].state); | 
 | 1062 | #else | 
 | 1063 | 	return 0; | 
 | 1064 | #endif | 
 | 1065 | } | 
 | 1066 |  | 
| Pavel Emelyanov | 668f895 | 2007-10-21 17:01:56 -0700 | [diff] [blame] | 1067 | static inline int netif_subqueue_stopped(const struct net_device *dev, | 
 | 1068 | 					 struct sk_buff *skb) | 
 | 1069 | { | 
 | 1070 | 	return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | 
 | 1071 | } | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1072 |  | 
 | 1073 | /** | 
 | 1074 |  *	netif_wake_subqueue - allow sending packets on subqueue | 
 | 1075 |  *	@dev: network device | 
 | 1076 |  *	@queue_index: sub queue index | 
 | 1077 |  * | 
 | 1078 |  * Resume individual transmit queue of a device with multiple transmit queues. | 
 | 1079 |  */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1080 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | 
 | 1081 | { | 
 | 1082 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 
 | 1083 | #ifdef CONFIG_NETPOLL_TRAP | 
 | 1084 | 	if (netpoll_trap()) | 
 | 1085 | 		return; | 
 | 1086 | #endif | 
 | 1087 | 	if (test_and_clear_bit(__LINK_STATE_XOFF, | 
 | 1088 | 			       &dev->egress_subqueue[queue_index].state)) | 
 | 1089 | 		__netif_schedule(dev); | 
 | 1090 | #endif | 
 | 1091 | } | 
 | 1092 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1093 | /** | 
 | 1094 |  *	netif_is_multiqueue - test if device has multiple transmit queues | 
 | 1095 |  *	@dev: network device | 
 | 1096 |  * | 
 | 1097 |  * Check if device has multiple transmit queues | 
 | 1098 |  * Always falls if NETDEVICE_MULTIQUEUE is not configured | 
 | 1099 |  */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1100 | static inline int netif_is_multiqueue(const struct net_device *dev) | 
 | 1101 | { | 
 | 1102 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 
 | 1103 | 	return (!!(NETIF_F_MULTI_QUEUE & dev->features)); | 
 | 1104 | #else | 
 | 1105 | 	return 0; | 
 | 1106 | #endif | 
 | 1107 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 |  | 
 | 1109 | /* Use this variant when it is known for sure that it | 
| Matti Linnanvuori | 0ef4730 | 2008-03-28 16:33:00 -0700 | [diff] [blame] | 1110 |  * is executing from hardware interrupt context or with hardware interrupts | 
 | 1111 |  * disabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1112 |  */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1113 | extern void dev_kfree_skb_irq(struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 |  | 
 | 1115 | /* Use this variant in places where it could be invoked | 
| Matti Linnanvuori | 0ef4730 | 2008-03-28 16:33:00 -0700 | [diff] [blame] | 1116 |  * from either hardware interrupt or other context, with hardware interrupts | 
 | 1117 |  * either disabled or enabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 |  */ | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1119 | extern void dev_kfree_skb_any(struct sk_buff *skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 |  | 
 | 1121 | #define HAVE_NETIF_RX 1 | 
 | 1122 | extern int		netif_rx(struct sk_buff *skb); | 
 | 1123 | extern int		netif_rx_ni(struct sk_buff *skb); | 
 | 1124 | #define HAVE_NETIF_RECEIVE_SKB 1 | 
 | 1125 | extern int		netif_receive_skb(struct sk_buff *skb); | 
| Mitch Williams | c2373ee | 2005-11-09 10:34:45 -0800 | [diff] [blame] | 1126 | extern int		dev_valid_name(const char *name); | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1127 | extern int		dev_ioctl(struct net *net, unsigned int cmd, void __user *); | 
 | 1128 | extern int		dev_ethtool(struct net *net, struct ifreq *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | extern unsigned		dev_get_flags(const struct net_device *); | 
 | 1130 | extern int		dev_change_flags(struct net_device *, unsigned); | 
 | 1131 | extern int		dev_change_name(struct net_device *, char *); | 
| Eric W. Biederman | ce286d3 | 2007-09-12 13:53:49 +0200 | [diff] [blame] | 1132 | extern int		dev_change_net_namespace(struct net_device *, | 
 | 1133 | 						 struct net *, const char *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | extern int		dev_set_mtu(struct net_device *, int); | 
 | 1135 | extern int		dev_set_mac_address(struct net_device *, | 
 | 1136 | 					    struct sockaddr *); | 
| Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 1137 | extern int		dev_hard_start_xmit(struct sk_buff *skb, | 
 | 1138 | 					    struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1140 | extern int		netdev_budget; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 |  | 
 | 1142 | /* Called by rtnetlink.c:rtnl_unlock() */ | 
 | 1143 | extern void netdev_run_todo(void); | 
 | 1144 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1145 | /** | 
 | 1146 |  *	dev_put - release reference to device | 
 | 1147 |  *	@dev: network device | 
 | 1148 |  * | 
| Benjamin Thery | 9ef4429 | 2007-10-10 21:18:17 -0700 | [diff] [blame] | 1149 |  * Release reference to device to allow it to be freed. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1150 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | static inline void dev_put(struct net_device *dev) | 
 | 1152 | { | 
 | 1153 | 	atomic_dec(&dev->refcnt); | 
 | 1154 | } | 
 | 1155 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1156 | /** | 
 | 1157 |  *	dev_hold - get reference to device | 
 | 1158 |  *	@dev: network device | 
 | 1159 |  * | 
| Benjamin Thery | 9ef4429 | 2007-10-10 21:18:17 -0700 | [diff] [blame] | 1160 |  * Hold reference to device to keep it from being freed. | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1161 |  */ | 
| Stephen Hemminger | 1533306 | 2006-03-20 22:32:28 -0800 | [diff] [blame] | 1162 | static inline void dev_hold(struct net_device *dev) | 
 | 1163 | { | 
 | 1164 | 	atomic_inc(&dev->refcnt); | 
 | 1165 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 |  | 
 | 1167 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | 
 | 1168 |  * and _off may be called from IRQ context, but it is caller | 
 | 1169 |  * who is responsible for serialization of these calls. | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 1170 |  * | 
 | 1171 |  * The name carrier is inappropriate, these functions should really be | 
 | 1172 |  * called netif_lowerlayer_*() because they represent the state of any | 
 | 1173 |  * kind of lower layer not just hardware media. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1174 |  */ | 
 | 1175 |  | 
 | 1176 | extern void linkwatch_fire_event(struct net_device *dev); | 
 | 1177 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1178 | /** | 
 | 1179 |  *	netif_carrier_ok - test if carrier present | 
 | 1180 |  *	@dev: network device | 
 | 1181 |  * | 
 | 1182 |  * Check if carrier is present on device | 
 | 1183 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 | static inline int netif_carrier_ok(const struct net_device *dev) | 
 | 1185 | { | 
 | 1186 | 	return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | 
 | 1187 | } | 
 | 1188 |  | 
 | 1189 | extern void __netdev_watchdog_up(struct net_device *dev); | 
 | 1190 |  | 
| Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 1191 | extern void netif_carrier_on(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 |  | 
| Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 1193 | extern void netif_carrier_off(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1195 | /** | 
 | 1196 |  *	netif_dormant_on - mark device as dormant. | 
 | 1197 |  *	@dev: network device | 
 | 1198 |  * | 
 | 1199 |  * Mark device as dormant (as per RFC2863). | 
 | 1200 |  * | 
 | 1201 |  * The dormant state indicates that the relevant interface is not | 
 | 1202 |  * actually in a condition to pass packets (i.e., it is not 'up') but is | 
 | 1203 |  * in a "pending" state, waiting for some external event.  For "on- | 
 | 1204 |  * demand" interfaces, this new state identifies the situation where the | 
 | 1205 |  * interface is waiting for events to place it in the up state. | 
 | 1206 |  * | 
 | 1207 |  */ | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 1208 | static inline void netif_dormant_on(struct net_device *dev) | 
 | 1209 | { | 
 | 1210 | 	if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | 
 | 1211 | 		linkwatch_fire_event(dev); | 
 | 1212 | } | 
 | 1213 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1214 | /** | 
 | 1215 |  *	netif_dormant_off - set device as not dormant. | 
 | 1216 |  *	@dev: network device | 
 | 1217 |  * | 
 | 1218 |  * Device is not in dormant state. | 
 | 1219 |  */ | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 1220 | static inline void netif_dormant_off(struct net_device *dev) | 
 | 1221 | { | 
 | 1222 | 	if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | 
 | 1223 | 		linkwatch_fire_event(dev); | 
 | 1224 | } | 
 | 1225 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1226 | /** | 
 | 1227 |  *	netif_dormant - test if carrier present | 
 | 1228 |  *	@dev: network device | 
 | 1229 |  * | 
 | 1230 |  * Check if carrier is present on device | 
 | 1231 |  */ | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 1232 | static inline int netif_dormant(const struct net_device *dev) | 
 | 1233 | { | 
 | 1234 | 	return test_bit(__LINK_STATE_DORMANT, &dev->state); | 
 | 1235 | } | 
 | 1236 |  | 
 | 1237 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1238 | /** | 
 | 1239 |  *	netif_oper_up - test if device is operational | 
 | 1240 |  *	@dev: network device | 
 | 1241 |  * | 
 | 1242 |  * Check if carrier is operational | 
 | 1243 |  */ | 
| Stefan Rompf | b00055a | 2006-03-20 17:09:11 -0800 | [diff] [blame] | 1244 | static inline int netif_oper_up(const struct net_device *dev) { | 
 | 1245 | 	return (dev->operstate == IF_OPER_UP || | 
 | 1246 | 		dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | 
 | 1247 | } | 
 | 1248 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1249 | /** | 
 | 1250 |  *	netif_device_present - is device available or removed | 
 | 1251 |  *	@dev: network device | 
 | 1252 |  * | 
 | 1253 |  * Check if device has not been removed from system. | 
 | 1254 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1255 | static inline int netif_device_present(struct net_device *dev) | 
 | 1256 | { | 
 | 1257 | 	return test_bit(__LINK_STATE_PRESENT, &dev->state); | 
 | 1258 | } | 
 | 1259 |  | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1260 | extern void netif_device_detach(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 |  | 
| Denis Vlasenko | 5607943 | 2006-03-29 15:57:29 -0800 | [diff] [blame] | 1262 | extern void netif_device_attach(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 |  | 
 | 1264 | /* | 
 | 1265 |  * Network interface message level settings | 
 | 1266 |  */ | 
 | 1267 | #define HAVE_NETIF_MSG 1 | 
 | 1268 |  | 
 | 1269 | enum { | 
 | 1270 | 	NETIF_MSG_DRV		= 0x0001, | 
 | 1271 | 	NETIF_MSG_PROBE		= 0x0002, | 
 | 1272 | 	NETIF_MSG_LINK		= 0x0004, | 
 | 1273 | 	NETIF_MSG_TIMER		= 0x0008, | 
 | 1274 | 	NETIF_MSG_IFDOWN	= 0x0010, | 
 | 1275 | 	NETIF_MSG_IFUP		= 0x0020, | 
 | 1276 | 	NETIF_MSG_RX_ERR	= 0x0040, | 
 | 1277 | 	NETIF_MSG_TX_ERR	= 0x0080, | 
 | 1278 | 	NETIF_MSG_TX_QUEUED	= 0x0100, | 
 | 1279 | 	NETIF_MSG_INTR		= 0x0200, | 
 | 1280 | 	NETIF_MSG_TX_DONE	= 0x0400, | 
 | 1281 | 	NETIF_MSG_RX_STATUS	= 0x0800, | 
 | 1282 | 	NETIF_MSG_PKTDATA	= 0x1000, | 
 | 1283 | 	NETIF_MSG_HW		= 0x2000, | 
 | 1284 | 	NETIF_MSG_WOL		= 0x4000, | 
 | 1285 | }; | 
 | 1286 |  | 
 | 1287 | #define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV) | 
 | 1288 | #define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE) | 
 | 1289 | #define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK) | 
 | 1290 | #define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER) | 
 | 1291 | #define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN) | 
 | 1292 | #define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP) | 
 | 1293 | #define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR) | 
 | 1294 | #define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR) | 
 | 1295 | #define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED) | 
 | 1296 | #define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR) | 
 | 1297 | #define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE) | 
 | 1298 | #define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS) | 
 | 1299 | #define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA) | 
 | 1300 | #define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW) | 
 | 1301 | #define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL) | 
 | 1302 |  | 
 | 1303 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | 
 | 1304 | { | 
 | 1305 | 	/* use default */ | 
 | 1306 | 	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | 
 | 1307 | 		return default_msg_enable_bits; | 
 | 1308 | 	if (debug_value == 0)	/* no output */ | 
 | 1309 | 		return 0; | 
 | 1310 | 	/* set low N bits */ | 
 | 1311 | 	return (1 << debug_value) - 1; | 
 | 1312 | } | 
 | 1313 |  | 
| shemminger@osdl.org | 0a12257 | 2005-11-30 11:45:17 -0800 | [diff] [blame] | 1314 | /* Test if receive needs to be scheduled but only if up */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1315 | static inline int netif_rx_schedule_prep(struct net_device *dev, | 
 | 1316 | 					 struct napi_struct *napi) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 | { | 
| David S. Miller | a0a4619 | 2008-01-07 20:35:07 -0800 | [diff] [blame] | 1318 | 	return napi_schedule_prep(napi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | } | 
 | 1320 |  | 
 | 1321 | /* Add interface to tail of rx poll list. This assumes that _prep has | 
 | 1322 |  * already been called and returned 1. | 
 | 1323 |  */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1324 | static inline void __netif_rx_schedule(struct net_device *dev, | 
 | 1325 | 				       struct napi_struct *napi) | 
 | 1326 | { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1327 | 	__napi_schedule(napi); | 
 | 1328 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 |  | 
 | 1330 | /* Try to reschedule poll. Called by irq handler. */ | 
 | 1331 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1332 | static inline void netif_rx_schedule(struct net_device *dev, | 
 | 1333 | 				     struct napi_struct *napi) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1335 | 	if (netif_rx_schedule_prep(dev, napi)) | 
 | 1336 | 		__netif_rx_schedule(dev, napi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | } | 
 | 1338 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1339 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().  */ | 
 | 1340 | static inline int netif_rx_reschedule(struct net_device *dev, | 
 | 1341 | 				      struct napi_struct *napi) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1342 | { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1343 | 	if (napi_schedule_prep(napi)) { | 
 | 1344 | 		__netif_rx_schedule(dev, napi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1345 | 		return 1; | 
 | 1346 | 	} | 
 | 1347 | 	return 0; | 
 | 1348 | } | 
 | 1349 |  | 
| Herbert Xu | b0ba666 | 2007-05-29 13:22:52 -0700 | [diff] [blame] | 1350 | /* same as netif_rx_complete, except that local_irq_save(flags) | 
 | 1351 |  * has already been issued | 
 | 1352 |  */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1353 | static inline void __netif_rx_complete(struct net_device *dev, | 
 | 1354 | 				       struct napi_struct *napi) | 
| Herbert Xu | b0ba666 | 2007-05-29 13:22:52 -0700 | [diff] [blame] | 1355 | { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1356 | 	__napi_complete(napi); | 
| Herbert Xu | b0ba666 | 2007-05-29 13:22:52 -0700 | [diff] [blame] | 1357 | } | 
 | 1358 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | /* Remove interface from poll list: it must be in the poll list | 
 | 1360 |  * on current cpu. This primitive is called by dev->poll(), when | 
 | 1361 |  * it completes the work. The device cannot be out of poll list at this | 
 | 1362 |  * moment, it is BUG(). | 
 | 1363 |  */ | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1364 | static inline void netif_rx_complete(struct net_device *dev, | 
 | 1365 | 				     struct napi_struct *napi) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | { | 
 | 1367 | 	unsigned long flags; | 
 | 1368 |  | 
 | 1369 | 	local_irq_save(flags); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1370 | 	__netif_rx_complete(dev, napi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1371 | 	local_irq_restore(flags); | 
 | 1372 | } | 
 | 1373 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1374 | /** | 
 | 1375 |  *	netif_tx_lock - grab network device transmit lock | 
 | 1376 |  *	@dev: network device | 
| Randy Dunlap | c4ea43c | 2007-10-12 21:17:49 -0700 | [diff] [blame] | 1377 |  *	@cpu: cpu number of lock owner | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1378 |  * | 
 | 1379 |  * Get network device transmit lock | 
 | 1380 |  */ | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 1381 | static inline void __netif_tx_lock(struct net_device *dev, int cpu) | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 1382 | { | 
 | 1383 | 	spin_lock(&dev->_xmit_lock); | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 1384 | 	dev->xmit_lock_owner = cpu; | 
 | 1385 | } | 
 | 1386 |  | 
 | 1387 | static inline void netif_tx_lock(struct net_device *dev) | 
 | 1388 | { | 
 | 1389 | 	__netif_tx_lock(dev, smp_processor_id()); | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 1390 | } | 
 | 1391 |  | 
 | 1392 | static inline void netif_tx_lock_bh(struct net_device *dev) | 
 | 1393 | { | 
 | 1394 | 	spin_lock_bh(&dev->_xmit_lock); | 
 | 1395 | 	dev->xmit_lock_owner = smp_processor_id(); | 
 | 1396 | } | 
 | 1397 |  | 
 | 1398 | static inline int netif_tx_trylock(struct net_device *dev) | 
 | 1399 | { | 
| Herbert Xu | 53c4b2c | 2006-07-21 14:55:38 -0700 | [diff] [blame] | 1400 | 	int ok = spin_trylock(&dev->_xmit_lock); | 
 | 1401 | 	if (likely(ok)) | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 1402 | 		dev->xmit_lock_owner = smp_processor_id(); | 
| Herbert Xu | 53c4b2c | 2006-07-21 14:55:38 -0700 | [diff] [blame] | 1403 | 	return ok; | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 1404 | } | 
 | 1405 |  | 
 | 1406 | static inline void netif_tx_unlock(struct net_device *dev) | 
 | 1407 | { | 
 | 1408 | 	dev->xmit_lock_owner = -1; | 
 | 1409 | 	spin_unlock(&dev->_xmit_lock); | 
 | 1410 | } | 
 | 1411 |  | 
 | 1412 | static inline void netif_tx_unlock_bh(struct net_device *dev) | 
 | 1413 | { | 
 | 1414 | 	dev->xmit_lock_owner = -1; | 
 | 1415 | 	spin_unlock_bh(&dev->_xmit_lock); | 
 | 1416 | } | 
 | 1417 |  | 
| Jamal Hadi Salim | 22dd749 | 2007-09-16 14:40:49 -0700 | [diff] [blame] | 1418 | #define HARD_TX_LOCK(dev, cpu) {			\ | 
 | 1419 | 	if ((dev->features & NETIF_F_LLTX) == 0) {	\ | 
 | 1420 | 		__netif_tx_lock(dev, cpu);			\ | 
 | 1421 | 	}						\ | 
 | 1422 | } | 
 | 1423 |  | 
 | 1424 | #define HARD_TX_UNLOCK(dev) {				\ | 
 | 1425 | 	if ((dev->features & NETIF_F_LLTX) == 0) {	\ | 
 | 1426 | 		netif_tx_unlock(dev);			\ | 
 | 1427 | 	}						\ | 
 | 1428 | } | 
 | 1429 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1430 | static inline void netif_tx_disable(struct net_device *dev) | 
 | 1431 | { | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 1432 | 	netif_tx_lock_bh(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 | 	netif_stop_queue(dev); | 
| Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 1434 | 	netif_tx_unlock_bh(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1435 | } | 
 | 1436 |  | 
 | 1437 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ | 
 | 1438 |  | 
 | 1439 | extern void		ether_setup(struct net_device *dev); | 
 | 1440 |  | 
 | 1441 | /* Support for loadable net-drivers */ | 
| Peter P Waskiewicz Jr | f25f4e4 | 2007-07-06 13:36:20 -0700 | [diff] [blame] | 1442 | extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | 
 | 1443 | 				       void (*setup)(struct net_device *), | 
 | 1444 | 				       unsigned int queue_count); | 
 | 1445 | #define alloc_netdev(sizeof_priv, name, setup) \ | 
 | 1446 | 	alloc_netdev_mq(sizeof_priv, name, setup, 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1447 | extern int		register_netdev(struct net_device *dev); | 
 | 1448 | extern void		unregister_netdev(struct net_device *dev); | 
| Patrick McHardy | 4417da6 | 2007-06-27 01:28:10 -0700 | [diff] [blame] | 1449 | /* Functions used for secondary unicast and multicast support */ | 
 | 1450 | extern void		dev_set_rx_mode(struct net_device *dev); | 
 | 1451 | extern void		__dev_set_rx_mode(struct net_device *dev); | 
 | 1452 | extern int		dev_unicast_delete(struct net_device *dev, void *addr, int alen); | 
 | 1453 | extern int		dev_unicast_add(struct net_device *dev, void *addr, int alen); | 
| Chris Leech | e83a2ea | 2008-01-31 16:53:23 -0800 | [diff] [blame] | 1454 | extern int		dev_unicast_sync(struct net_device *to, struct net_device *from); | 
 | 1455 | extern void		dev_unicast_unsync(struct net_device *to, struct net_device *from); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 | extern int 		dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); | 
 | 1457 | extern int		dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); | 
| Patrick McHardy | a0a400d | 2007-07-14 18:52:02 -0700 | [diff] [blame] | 1458 | extern int		dev_mc_sync(struct net_device *to, struct net_device *from); | 
 | 1459 | extern void		dev_mc_unsync(struct net_device *to, struct net_device *from); | 
| Patrick McHardy | 61cbc2f | 2007-06-30 13:35:52 -0700 | [diff] [blame] | 1460 | extern int 		__dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); | 
 | 1461 | extern int		__dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); | 
| Chris Leech | e83a2ea | 2008-01-31 16:53:23 -0800 | [diff] [blame] | 1462 | extern int		__dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); | 
 | 1463 | extern void		__dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1464 | extern void		dev_set_promiscuity(struct net_device *dev, int inc); | 
 | 1465 | extern void		dev_set_allmulti(struct net_device *dev, int inc); | 
 | 1466 | extern void		netdev_state_change(struct net_device *dev); | 
| Stephen Hemminger | d8a33ac | 2005-05-29 14:13:47 -0700 | [diff] [blame] | 1467 | extern void		netdev_features_change(struct net_device *dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | /* Load a device via the kmod */ | 
| Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 1469 | extern void		dev_load(struct net *net, const char *name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | extern void		dev_mcast_init(void); | 
 | 1471 | extern int		netdev_max_backlog; | 
 | 1472 | extern int		weight_p; | 
 | 1473 | extern int		netdev_set_master(struct net_device *dev, struct net_device *master); | 
| Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 1474 | extern int skb_checksum_help(struct sk_buff *skb); | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 1475 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); | 
| Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 1476 | #ifdef CONFIG_BUG | 
 | 1477 | extern void netdev_rx_csum_fault(struct net_device *dev); | 
 | 1478 | #else | 
 | 1479 | static inline void netdev_rx_csum_fault(struct net_device *dev) | 
 | 1480 | { | 
 | 1481 | } | 
 | 1482 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | /* rx skb timestamps */ | 
 | 1484 | extern void		net_enable_timestamp(void); | 
 | 1485 | extern void		net_disable_timestamp(void); | 
 | 1486 |  | 
| Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1487 | #ifdef CONFIG_PROC_FS | 
 | 1488 | extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); | 
 | 1489 | extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); | 
 | 1490 | extern void dev_seq_stop(struct seq_file *seq, void *v); | 
 | 1491 | #endif | 
 | 1492 |  | 
 | 1493 | extern void linkwatch_run_queue(void); | 
 | 1494 |  | 
| Herbert Xu | 7f353bf | 2007-08-10 15:47:58 -0700 | [diff] [blame] | 1495 | extern int netdev_compute_features(unsigned long all, unsigned long one); | 
 | 1496 |  | 
| Herbert Xu | bcd7611 | 2006-06-30 13:36:35 -0700 | [diff] [blame] | 1497 | static inline int net_gso_ok(int features, int gso_type) | 
 | 1498 | { | 
 | 1499 | 	int feature = gso_type << NETIF_F_GSO_SHIFT; | 
 | 1500 | 	return (features & feature) == feature; | 
 | 1501 | } | 
 | 1502 |  | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 1503 | static inline int skb_gso_ok(struct sk_buff *skb, int features) | 
 | 1504 | { | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1505 | 	return net_gso_ok(features, skb_shinfo(skb)->gso_type); | 
| Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 1506 | } | 
 | 1507 |  | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 1508 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) | 
 | 1509 | { | 
| Herbert Xu | a430a43 | 2006-07-08 13:34:56 -0700 | [diff] [blame] | 1510 | 	return skb_is_gso(skb) && | 
 | 1511 | 	       (!skb_gso_ok(skb, dev->features) || | 
| Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 1512 | 		unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 1513 | } | 
 | 1514 |  | 
| Peter P Waskiewicz Jr | 82cc1a7 | 2008-03-21 03:43:19 -0700 | [diff] [blame] | 1515 | static inline void netif_set_gso_max_size(struct net_device *dev, | 
 | 1516 | 					  unsigned int size) | 
 | 1517 | { | 
 | 1518 | 	dev->gso_max_size = size; | 
 | 1519 | } | 
 | 1520 |  | 
| David S. Miller | 7ea49ed | 2006-08-14 17:08:36 -0700 | [diff] [blame] | 1521 | /* On bonding slaves other than the currently active slave, suppress | 
| Jay Vosburgh | f5b2b96 | 2006-09-22 21:54:53 -0700 | [diff] [blame] | 1522 |  * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and | 
 | 1523 |  * ARP on active-backup slaves with arp_validate enabled. | 
| David S. Miller | 7ea49ed | 2006-08-14 17:08:36 -0700 | [diff] [blame] | 1524 |  */ | 
 | 1525 | static inline int skb_bond_should_drop(struct sk_buff *skb) | 
 | 1526 | { | 
 | 1527 | 	struct net_device *dev = skb->dev; | 
 | 1528 | 	struct net_device *master = dev->master; | 
 | 1529 |  | 
 | 1530 | 	if (master && | 
 | 1531 | 	    (dev->priv_flags & IFF_SLAVE_INACTIVE)) { | 
| Jay Vosburgh | f5b2b96 | 2006-09-22 21:54:53 -0700 | [diff] [blame] | 1532 | 		if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && | 
 | 1533 | 		    skb->protocol == __constant_htons(ETH_P_ARP)) | 
 | 1534 | 			return 0; | 
 | 1535 |  | 
| David S. Miller | 7ea49ed | 2006-08-14 17:08:36 -0700 | [diff] [blame] | 1536 | 		if (master->priv_flags & IFF_MASTER_ALB) { | 
 | 1537 | 			if (skb->pkt_type != PACKET_BROADCAST && | 
 | 1538 | 			    skb->pkt_type != PACKET_MULTICAST) | 
 | 1539 | 				return 0; | 
 | 1540 | 		} | 
 | 1541 | 		if (master->priv_flags & IFF_MASTER_8023AD && | 
 | 1542 | 		    skb->protocol == __constant_htons(ETH_P_SLOW)) | 
 | 1543 | 			return 0; | 
 | 1544 |  | 
 | 1545 | 		return 1; | 
 | 1546 | 	} | 
 | 1547 | 	return 0; | 
 | 1548 | } | 
 | 1549 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1550 | #endif /* __KERNEL__ */ | 
 | 1551 |  | 
 | 1552 | #endif	/* _LINUX_DEV_H */ |