blob: ee583f642a9f56de52b7be72c381764ebf816307 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <Alan.Cox@linux.org>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31
32#ifdef __KERNEL__
Al Virod7fe0f22006-12-03 23:15:30 -050033#include <linux/timer.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070034#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/atomic.h>
36#include <asm/cache.h>
37#include <asm/byteorder.h>
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/device.h>
40#include <linux/percpu.h>
Chris Leechdb217332006-06-17 21:24:58 -070041#include <linux/dmaengine.h>
Stephen Hemmingerbea33482007-10-03 16:41:36 -070042#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Daniel Lezcanoa050c332007-09-12 14:57:09 +020044#include <net/net_namespace.h>
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046struct vlan_group;
47struct ethtool_ops;
Jeff Moyer115c1d62005-06-22 22:05:31 -070048struct netpoll_info;
Johannes Berg704232c2007-04-23 12:20:05 -070049/* 802.11 specific */
50struct wireless_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 /* source back-compat hooks */
52#define SET_ETHTOOL_OPS(netdev,ops) \
53 ( (netdev)->ethtool_ops = (ops) )
54
55#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
56 functions are available. */
57#define HAVE_FREE_NETDEV /* free_netdev() */
58#define HAVE_NETDEV_PRIV /* netdev_priv() */
59
60#define NET_XMIT_SUCCESS 0
61#define NET_XMIT_DROP 1 /* skb dropped */
62#define NET_XMIT_CN 2 /* congestion notification */
63#define NET_XMIT_POLICED 3 /* skb is shot by police */
64#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
65 (TC use only - dev_queue_xmit
66 returns this as NET_XMIT_SUCCESS) */
67
68/* Backlog congestion levels */
69#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
70#define NET_RX_DROP 1 /* packet dropped */
71#define NET_RX_CN_LOW 2 /* storm alert, just in case */
72#define NET_RX_CN_MOD 3 /* Storm on its way! */
73#define NET_RX_CN_HIGH 4 /* The storm is here */
74#define NET_RX_BAD 5 /* packet dropped due to kernel error */
75
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -020076/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
77 * indicates that the device will soon be dropping packets, or already drops
78 * some packets of the same priority; prompting us to send less aggressively. */
79#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
81
82#endif
83
84#define MAX_ADDR_LEN 32 /* Largest hardware address length */
85
86/* Driver transmit return codes */
87#define NETDEV_TX_OK 0 /* driver took care of packet */
88#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
89#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
90
Adrian Bunkc88e6f52008-06-27 19:54:54 -070091#ifdef __KERNEL__
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093/*
94 * Compute the worst case header length according to the protocols
95 * used.
96 */
97
David S. Miller8388e3d2008-05-12 20:17:33 -070098#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
99# if defined(CONFIG_MAC80211_MESH)
100# define LL_MAX_HEADER 128
101# else
102# define LL_MAX_HEADER 96
103# endif
104#elif defined(CONFIG_TR)
105# define LL_MAX_HEADER 48
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#else
David S. Miller8388e3d2008-05-12 20:17:33 -0700107# define LL_MAX_HEADER 32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#endif
109
David S. Millere81c73592006-11-28 20:53:39 -0800110#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
111 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
112 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
113 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#define MAX_HEADER LL_MAX_HEADER
115#else
116#define MAX_HEADER (LL_MAX_HEADER + 48)
117#endif
118
Adrian Bunkc88e6f52008-06-27 19:54:54 -0700119#endif /* __KERNEL__ */
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/*
122 * Network device statistics. Akin to the 2.0 ether stats but
123 * with byte counters.
124 */
125
126struct net_device_stats
127{
128 unsigned long rx_packets; /* total packets received */
129 unsigned long tx_packets; /* total packets transmitted */
130 unsigned long rx_bytes; /* total bytes received */
131 unsigned long tx_bytes; /* total bytes transmitted */
132 unsigned long rx_errors; /* bad packets received */
133 unsigned long tx_errors; /* packet transmit problems */
134 unsigned long rx_dropped; /* no space in linux buffers */
135 unsigned long tx_dropped; /* no space available in linux */
136 unsigned long multicast; /* multicast packets received */
137 unsigned long collisions;
138
139 /* detailed rx_errors: */
140 unsigned long rx_length_errors;
141 unsigned long rx_over_errors; /* receiver ring buff overflow */
142 unsigned long rx_crc_errors; /* recved pkt with crc error */
143 unsigned long rx_frame_errors; /* recv'd frame alignment error */
144 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
145 unsigned long rx_missed_errors; /* receiver missed packet */
146
147 /* detailed tx_errors */
148 unsigned long tx_aborted_errors;
149 unsigned long tx_carrier_errors;
150 unsigned long tx_fifo_errors;
151 unsigned long tx_heartbeat_errors;
152 unsigned long tx_window_errors;
153
154 /* for cslip etc */
155 unsigned long rx_compressed;
156 unsigned long tx_compressed;
157};
158
159
160/* Media selection options. */
161enum {
162 IF_PORT_UNKNOWN = 0,
163 IF_PORT_10BASE2,
164 IF_PORT_10BASET,
165 IF_PORT_AUI,
166 IF_PORT_100BASET,
167 IF_PORT_100BASETX,
168 IF_PORT_100BASEFX
169};
170
171#ifdef __KERNEL__
172
173#include <linux/cache.h>
174#include <linux/skbuff.h>
175
176struct neighbour;
177struct neigh_parms;
178struct sk_buff;
179
180struct netif_rx_stats
181{
182 unsigned total;
183 unsigned dropped;
184 unsigned time_squeeze;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 unsigned cpu_collision;
186};
187
188DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
189
Patrick McHardybf742482007-06-27 01:26:19 -0700190struct dev_addr_list
191{
192 struct dev_addr_list *next;
193 u8 da_addr[MAX_ADDR_LEN];
194 u8 da_addrlen;
Patrick McHardya0a400d2007-07-14 18:52:02 -0700195 u8 da_synced;
Patrick McHardybf742482007-06-27 01:26:19 -0700196 int da_users;
197 int da_gusers;
198};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200/*
201 * We tag multicasts with these structures.
202 */
Patrick McHardy3fba5a82007-06-27 01:26:58 -0700203
204#define dev_mc_list dev_addr_list
205#define dmi_addr da_addr
206#define dmi_addrlen da_addrlen
207#define dmi_users da_users
208#define dmi_gusers da_gusers
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210struct hh_cache
211{
212 struct hh_cache *hh_next; /* Next entry */
213 atomic_t hh_refcnt; /* number of users */
Eric Dumazetf0490982006-12-08 00:08:43 -0800214/*
215 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
216 * cache line on SMP.
217 * They are mostly read, but hh_refcnt may be changed quite frequently,
218 * incurring cache line ping pongs.
219 */
220 __be16 hh_type ____cacheline_aligned_in_smp;
221 /* protocol identifier, f.e ETH_P_IP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 * NOTE: For VLANs, this will be the
223 * encapuslated type. --BLG
224 */
Arnaldo Carvalho de Melod5c42c02006-11-27 17:58:02 -0200225 u16 hh_len; /* length of header */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 int (*hh_output)(struct sk_buff *skb);
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800227 seqlock_t hh_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
229 /* cached hardware header; allow for machine alignment needs. */
230#define HH_DATA_MOD 16
231#define HH_DATA_OFF(__len) \
Jiri Benc5ba0eac2005-06-02 16:48:05 -0700232 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233#define HH_DATA_ALIGN(__len) \
234 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
235 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
236};
237
238/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
239 * Alternative is:
240 * dev->hard_header_len ? (dev->hard_header_len +
241 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
242 *
243 * We could use other alignment values, but we must maintain the
244 * relationship HH alignment <= LL alignment.
Johannes Bergf5184d22008-05-12 20:48:31 -0700245 *
246 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
247 * may need.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 */
249#define LL_RESERVED_SPACE(dev) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700250 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
Johannes Bergf5184d22008-05-12 20:48:31 -0700252 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
253#define LL_ALLOCATED_SPACE(dev) \
254 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700256struct header_ops {
257 int (*create) (struct sk_buff *skb, struct net_device *dev,
258 unsigned short type, const void *daddr,
259 const void *saddr, unsigned len);
260 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
261 int (*rebuild)(struct sk_buff *skb);
262#define HAVE_HEADER_CACHE
263 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
264 void (*cache_update)(struct hh_cache *hh,
265 const struct net_device *dev,
266 const unsigned char *haddr);
267};
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269/* These flag bits are private to the generic network queueing
270 * layer, they may not be explicitly referenced by any other
271 * code.
272 */
273
274enum netdev_state_t
275{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 __LINK_STATE_START,
277 __LINK_STATE_PRESENT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 __LINK_STATE_NOCARRIER,
Stefan Rompfb00055a2006-03-20 17:09:11 -0800279 __LINK_STATE_LINKWATCH_PENDING,
280 __LINK_STATE_DORMANT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281};
282
283
284/*
285 * This structure holds at boot time configured netdevice settings. They
286 * are then used in the device probing.
287 */
288struct netdev_boot_setup {
289 char name[IFNAMSIZ];
290 struct ifmap map;
291};
292#define NETDEV_BOOT_SETUP_MAX 8
293
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -0300294extern int __init netdev_boot_setup(char *str);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
296/*
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700297 * Structure for NAPI scheduling similar to tasklet but with weighting
298 */
299struct napi_struct {
300 /* The poll_list must only be managed by the entity which
301 * changes the state of the NAPI_STATE_SCHED bit. This means
302 * whoever atomically sets that bit can add this napi_struct
303 * to the per-cpu poll_list, and whoever clears that bit
304 * can remove from the list right before clearing the bit.
305 */
306 struct list_head poll_list;
307
308 unsigned long state;
309 int weight;
310 int (*poll)(struct napi_struct *, int);
311#ifdef CONFIG_NETPOLL
312 spinlock_t poll_lock;
313 int poll_owner;
314 struct net_device *dev;
315 struct list_head dev_list;
316#endif
317};
318
319enum
320{
321 NAPI_STATE_SCHED, /* Poll is scheduled */
David S. Millera0a46192008-01-07 20:35:07 -0800322 NAPI_STATE_DISABLE, /* Disable pending */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700323};
324
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800325extern void __napi_schedule(struct napi_struct *n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700326
David S. Millera0a46192008-01-07 20:35:07 -0800327static inline int napi_disable_pending(struct napi_struct *n)
328{
329 return test_bit(NAPI_STATE_DISABLE, &n->state);
330}
331
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700332/**
333 * napi_schedule_prep - check if napi can be scheduled
334 * @n: napi context
335 *
336 * Test if NAPI routine is already running, and if not mark
337 * it as running. This is used as a condition variable
David S. Millera0a46192008-01-07 20:35:07 -0800338 * insure only one NAPI poll instance runs. We also make
339 * sure there is no pending NAPI disable.
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700340 */
341static inline int napi_schedule_prep(struct napi_struct *n)
342{
David S. Millera0a46192008-01-07 20:35:07 -0800343 return !napi_disable_pending(n) &&
344 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700345}
346
347/**
348 * napi_schedule - schedule NAPI poll
349 * @n: napi context
350 *
351 * Schedule NAPI poll routine to be called if it is not already
352 * running.
353 */
354static inline void napi_schedule(struct napi_struct *n)
355{
356 if (napi_schedule_prep(n))
357 __napi_schedule(n);
358}
359
Roland Dreierbfe13f52007-10-09 15:47:37 -0700360/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
361static inline int napi_reschedule(struct napi_struct *napi)
362{
363 if (napi_schedule_prep(napi)) {
364 __napi_schedule(napi);
365 return 1;
366 }
367 return 0;
368}
369
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700370/**
371 * napi_complete - NAPI processing complete
372 * @n: napi context
373 *
374 * Mark NAPI processing as complete.
375 */
376static inline void __napi_complete(struct napi_struct *n)
377{
378 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
379 list_del(&n->poll_list);
380 smp_mb__before_clear_bit();
381 clear_bit(NAPI_STATE_SCHED, &n->state);
382}
383
384static inline void napi_complete(struct napi_struct *n)
385{
David S. Miller50fd4402008-03-27 17:42:50 -0700386 unsigned long flags;
387
388 local_irq_save(flags);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700389 __napi_complete(n);
David S. Miller50fd4402008-03-27 17:42:50 -0700390 local_irq_restore(flags);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700391}
392
393/**
394 * napi_disable - prevent NAPI from scheduling
395 * @n: napi context
396 *
397 * Stop NAPI from being scheduled on this context.
398 * Waits till any outstanding processing completes.
399 */
400static inline void napi_disable(struct napi_struct *n)
401{
David S. Millera0a46192008-01-07 20:35:07 -0800402 set_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700403 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
Benjamin Herrenschmidt43cc7382007-10-26 04:23:22 -0700404 msleep(1);
David S. Millera0a46192008-01-07 20:35:07 -0800405 clear_bit(NAPI_STATE_DISABLE, &n->state);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700406}
407
408/**
409 * napi_enable - enable NAPI scheduling
410 * @n: napi context
411 *
412 * Resume NAPI from being scheduled on this context.
413 * Must be paired with napi_disable.
414 */
415static inline void napi_enable(struct napi_struct *n)
416{
417 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
418 smp_mb__before_clear_bit();
419 clear_bit(NAPI_STATE_SCHED, &n->state);
420}
421
Stephen Hemmingerc264c3d2007-10-17 13:26:41 -0700422#ifdef CONFIG_SMP
423/**
424 * napi_synchronize - wait until NAPI is not running
425 * @n: napi context
426 *
427 * Wait until NAPI is done being scheduled on this context.
428 * Waits till any outstanding processing completes but
429 * does not disable future activations.
430 */
431static inline void napi_synchronize(const struct napi_struct *n)
432{
433 while (test_bit(NAPI_STATE_SCHED, &n->state))
434 msleep(1);
435}
436#else
437# define napi_synchronize(n) barrier()
438#endif
439
David S. Miller79d16382008-07-08 23:14:46 -0700440enum netdev_queue_state_t
441{
442 __QUEUE_STATE_XOFF,
David S. Millerc3f26a22008-07-31 16:58:50 -0700443 __QUEUE_STATE_FROZEN,
David S. Miller79d16382008-07-08 23:14:46 -0700444};
445
David S. Millerbb949fb2008-07-08 16:55:56 -0700446struct netdev_queue {
447 struct net_device *dev;
David S. Millerb0e1e642008-07-08 17:42:10 -0700448 struct Qdisc *qdisc;
David S. Miller79d16382008-07-08 23:14:46 -0700449 unsigned long state;
David S. Millerc773e842008-07-08 23:13:53 -0700450 spinlock_t _xmit_lock;
451 int xmit_lock_owner;
David S. Millerb0e1e642008-07-08 17:42:10 -0700452 struct Qdisc *qdisc_sleeping;
David S. Millere8a04642008-07-17 00:34:19 -0700453} ____cacheline_aligned_in_smp;
David S. Millerbb949fb2008-07-08 16:55:56 -0700454
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700455/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 * The DEVICE structure.
457 * Actually, this whole structure is a big mistake. It mixes I/O
458 * data with strictly "high-level" data, and it has to know about
459 * almost every data structure used in the INET module.
460 *
461 * FIXME: cleanup struct net_device such that network protocol info
462 * moves out.
463 */
464
465struct net_device
466{
467
468 /*
469 * This is the first field of the "visible" part of this structure
470 * (i.e. as seen by users in the "Space.c" file). It is the name
471 * the interface.
472 */
473 char name[IFNAMSIZ];
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700474 /* device name hash chain */
475 struct hlist_node name_hlist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 /*
478 * I/O specific fields
479 * FIXME: Merge these and struct ifmap into one
480 */
481 unsigned long mem_end; /* shared mem end */
482 unsigned long mem_start; /* shared mem start */
483 unsigned long base_addr; /* device I/O address */
484 unsigned int irq; /* device IRQ number */
485
486 /*
487 * Some hardware also needs these fields, but they are not
488 * part of the usual set specified in Space.c.
489 */
490
491 unsigned char if_port; /* Selectable AUI, TP,..*/
492 unsigned char dma; /* DMA channel */
493
494 unsigned long state;
495
Pavel Emelianov7562f872007-05-03 15:13:45 -0700496 struct list_head dev_list;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700497#ifdef CONFIG_NETPOLL
498 struct list_head napi_list;
499#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
501 /* The device initialization function. Called only once. */
502 int (*init)(struct net_device *dev);
503
504 /* ------- Fields preinitialized in Space.c finish here ------- */
505
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700506 /* Net device features */
507 unsigned long features;
508#define NETIF_F_SG 1 /* Scatter/gather IO. */
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700509#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700510#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
511#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700512#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700513#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
514#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
515#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
516#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
517#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
518#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
Herbert Xu37c31852006-06-22 03:07:29 -0700519#define NETIF_F_GSO 2048 /* Enable software GSO. */
Christian Borntraegere24eb522007-09-25 19:42:02 -0700520#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
521 /* do not use LLTX in new drivers */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200522#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
Jeff Garzik3ae7c0b2007-08-15 16:00:51 -0700523#define NETIF_F_LRO 32768 /* large receive offload */
Herbert Xu79671682006-06-22 02:40:14 -0700524
525 /* Segmentation offload features */
Patrick McHardy289c79a2008-05-23 00:22:04 -0700526#define NETIF_F_GSO_SHIFT 16
527#define NETIF_F_GSO_MASK 0xffff0000
Herbert Xu79671682006-06-22 02:40:14 -0700528#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700529#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
Herbert Xu576a30e2006-06-27 13:22:38 -0700530#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700531#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
532#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700533
Herbert Xu78eb8872006-08-17 18:22:32 -0700534 /* List of features with software fallbacks. */
535#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
536
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700537
Herbert Xu8648b302006-06-17 22:06:05 -0700538#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700539#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
540#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
541#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
Herbert Xu8648b302006-06-17 22:06:05 -0700542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 /* Interface index. Unique device identifier */
544 int ifindex;
545 int iflink;
546
547
548 struct net_device_stats* (*get_stats)(struct net_device *dev);
Rusty Russellc45d2862007-03-28 14:29:08 -0700549 struct net_device_stats stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Johannes Bergb86e0282007-04-26 20:48:23 -0700551#ifdef CONFIG_WIRELESS_EXT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 /* List of functions to handle Wireless Extensions (instead of ioctl).
553 * See <net/iw_handler.h> for details. Jean II */
554 const struct iw_handler_def * wireless_handlers;
555 /* Instance data managed by the core of Wireless Extensions. */
556 struct iw_public_data * wireless_data;
Johannes Bergb86e0282007-04-26 20:48:23 -0700557#endif
Stephen Hemminger76fd8592006-09-08 11:16:13 -0700558 const struct ethtool_ops *ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700560 /* Hardware header description */
561 const struct header_ops *header_ops;
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 /*
564 * This marks the end of the "visible" part of the structure. All
565 * fields hereafter are internal to the system, and may change at
566 * will (read: may be cleaned up at will).
567 */
568
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Stefan Rompfb00055a2006-03-20 17:09:11 -0800570 unsigned int flags; /* interface flags (a la BSD) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 unsigned short gflags;
572 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
573 unsigned short padded; /* How much padding added by alloc_netdev() */
574
Stefan Rompfb00055a2006-03-20 17:09:11 -0800575 unsigned char operstate; /* RFC2863 operstate */
576 unsigned char link_mode; /* mapping policy to operstate */
577
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 unsigned mtu; /* interface MTU value */
579 unsigned short type; /* interface hardware type */
580 unsigned short hard_header_len; /* hardware hdr length */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Johannes Bergf5184d22008-05-12 20:48:31 -0700582 /* extra head- and tailroom the hardware may need, but not in all cases
583 * can this be guaranteed, especially tailroom. Some cases also use
584 * LL_MAX_HEADER instead to allocate the skb.
585 */
586 unsigned short needed_headroom;
587 unsigned short needed_tailroom;
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 struct net_device *master; /* Pointer to master device of a group,
590 * which this device is member of.
591 */
592
593 /* Interface address info. */
Jon Wetzela6f9a702005-08-20 17:15:54 -0700594 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 unsigned char addr_len; /* hardware address length */
596 unsigned short dev_id; /* for shared network cards */
597
David S. Millerf1f28aa2008-07-15 00:08:33 -0700598 spinlock_t addr_list_lock;
Patrick McHardy4417da62007-06-27 01:28:10 -0700599 struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
600 int uc_count; /* Number of installed ucasts */
601 int uc_promisc;
Patrick McHardy3fba5a82007-06-27 01:26:58 -0700602 struct dev_addr_list *mc_list; /* Multicast mac addresses */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 int mc_count; /* Number of installed mcasts */
Wang Chen9d45abe2008-06-17 21:12:48 -0700604 unsigned int promiscuity;
605 unsigned int allmulti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
608 /* Protocol specific pointers */
609
610 void *atalk_ptr; /* AppleTalk link */
611 void *ip_ptr; /* IPv4 specific data */
612 void *dn_ptr; /* DECnet specific data */
613 void *ip6_ptr; /* IPv6 specific data */
614 void *ec_ptr; /* Econet specific data */
615 void *ax25_ptr; /* AX.25 specific data */
Johannes Berg704232c2007-04-23 12:20:05 -0700616 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
617 assign before registering */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700619/*
620 * Cache line mostly used on receive path (including eth_type_trans())
621 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700622 unsigned long last_rx; /* Time of last Rx */
623 /* Interface address info used in eth_type_trans() */
624 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
625 because most packets are unicast) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700627 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
628
David S. Millerbb949fb2008-07-08 16:55:56 -0700629 struct netdev_queue rx_queue;
David S. Millere8a04642008-07-17 00:34:19 -0700630
631 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700632
633 /* Number of TX queues allocated at alloc_netdev_mq() time */
David S. Millere8a04642008-07-17 00:34:19 -0700634 unsigned int num_tx_queues;
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700635
636 /* Number of TX queues currently active in device */
637 unsigned int real_num_tx_queues;
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 unsigned long tx_queue_len; /* Max frames per queue allowed */
David S. Millerc3f26a22008-07-31 16:58:50 -0700640 spinlock_t tx_global_lock;
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700641/*
642 * One part is mostly used on xmit path (device)
643 */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700644 void *priv; /* pointer to private data */
645 int (*hard_start_xmit) (struct sk_buff *skb,
646 struct net_device *dev);
647 /* These may be needed for future network-power-down code. */
648 unsigned long trans_start; /* Time (in jiffies) of last Tx */
649
650 int watchdog_timeo; /* used by dev_watchdog() */
651 struct timer_list watchdog_timer;
652
653/*
654 * refcnt is a very hot point, so align it on SMP
655 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 /* Number of references to this device */
Eric Dumazet9356b8f2005-09-27 15:23:16 -0700657 atomic_t refcnt ____cacheline_aligned_in_smp;
658
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 /* delayed register/unregister */
660 struct list_head todo_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 /* device index hash chain */
662 struct hlist_node index_hlist;
663
Herbert Xu572a1032007-05-08 18:34:17 -0700664 struct net_device *link_watch_next;
665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 /* register/unregister state machine */
667 enum { NETREG_UNINITIALIZED=0,
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -0700668 NETREG_REGISTERED, /* completed register_netdevice */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 NETREG_UNREGISTERING, /* called unregister_netdevice */
670 NETREG_UNREGISTERED, /* completed unregister todo */
671 NETREG_RELEASED, /* called free_netdev */
672 } reg_state;
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 /* Called after device is detached from network. */
675 void (*uninit)(struct net_device *dev);
676 /* Called after last user reference disappears. */
677 void (*destructor)(struct net_device *dev);
678
679 /* Pointers to interface service routines. */
680 int (*open)(struct net_device *dev);
681 int (*stop)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682#define HAVE_NETDEV_POLL
Patrick McHardy24023452007-07-14 18:51:31 -0700683#define HAVE_CHANGE_RX_FLAGS
684 void (*change_rx_flags)(struct net_device *dev,
685 int flags);
Patrick McHardy4417da62007-06-27 01:28:10 -0700686#define HAVE_SET_RX_MODE
687 void (*set_rx_mode)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688#define HAVE_MULTICAST
689 void (*set_multicast_list)(struct net_device *dev);
690#define HAVE_SET_MAC_ADDR
691 int (*set_mac_address)(struct net_device *dev,
692 void *addr);
Jeff Garzikbada3392007-10-23 20:19:37 -0700693#define HAVE_VALIDATE_ADDR
694 int (*validate_addr)(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695#define HAVE_PRIVATE_IOCTL
696 int (*do_ioctl)(struct net_device *dev,
697 struct ifreq *ifr, int cmd);
698#define HAVE_SET_CONFIG
699 int (*set_config)(struct net_device *dev,
700 struct ifmap *map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701#define HAVE_CHANGE_MTU
702 int (*change_mtu)(struct net_device *dev, int new_mtu);
703
704#define HAVE_TX_TIMEOUT
705 void (*tx_timeout) (struct net_device *dev);
706
707 void (*vlan_rx_register)(struct net_device *dev,
708 struct vlan_group *grp);
709 void (*vlan_rx_add_vid)(struct net_device *dev,
710 unsigned short vid);
711 void (*vlan_rx_kill_vid)(struct net_device *dev,
712 unsigned short vid);
713
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
715#ifdef CONFIG_NETPOLL
Jeff Moyer115c1d62005-06-22 22:05:31 -0700716 struct netpoll_info *npinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717#endif
718#ifdef CONFIG_NET_POLL_CONTROLLER
719 void (*poll_controller)(struct net_device *dev);
720#endif
721
David S. Millereae792b2008-07-15 03:03:33 -0700722 u16 (*select_queue)(struct net_device *dev,
723 struct sk_buff *skb);
724
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900725#ifdef CONFIG_NET_NS
Eric W. Biederman4a1c5372007-09-12 11:56:32 +0200726 /* Network namespace this network device is inside */
727 struct net *nd_net;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900728#endif
Eric W. Biederman4a1c5372007-09-12 11:56:32 +0200729
David S. Miller49517042008-05-12 03:29:11 -0700730 /* mid-layer private */
731 void *ml_priv;
732
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 /* bridge stuff */
734 struct net_bridge_port *br_port;
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700735 /* macvlan */
736 struct macvlan_port *macvlan_port;
Patrick McHardyeca9eba2008-07-05 21:26:13 -0700737 /* GARP */
738 struct garp_port *garp_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 /* class/net/name entry */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700741 struct device dev;
Stephen Hemmingerfe9925b2006-05-06 17:56:03 -0700742 /* space for optional statistics and wireless sysfs groups */
743 struct attribute_group *sysfs_groups[3];
Patrick McHardy38f7b872007-06-13 12:03:51 -0700744
745 /* rtnetlink link ops */
746 const struct rtnl_link_ops *rtnl_link_ops;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700747
Patrick McHardy289c79a2008-05-23 00:22:04 -0700748 /* VLAN feature mask */
749 unsigned long vlan_features;
750
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -0700751 /* for setting kernel sock attribute on TCP connection setup */
752#define GSO_MAX_SIZE 65536
753 unsigned int gso_max_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754};
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700755#define to_net_dev(d) container_of(d, struct net_device, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
757#define NETDEV_ALIGN 32
758#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
759
David S. Millere8a04642008-07-17 00:34:19 -0700760static inline
761struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
762 unsigned int index)
763{
764 return &dev->_tx[index];
765}
766
767static inline void netdev_for_each_tx_queue(struct net_device *dev,
768 void (*f)(struct net_device *,
769 struct netdev_queue *,
770 void *),
771 void *arg)
772{
773 unsigned int i;
774
775 for (i = 0; i < dev->num_tx_queues; i++)
776 f(dev, &dev->_tx[i], arg);
777}
778
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900779/*
780 * Net namespace inlines
781 */
782static inline
783struct net *dev_net(const struct net_device *dev)
784{
785#ifdef CONFIG_NET_NS
786 return dev->nd_net;
787#else
788 return &init_net;
789#endif
790}
791
792static inline
Denis V. Lunevf5aa23f2008-03-26 00:48:17 -0700793void dev_net_set(struct net_device *dev, struct net *net)
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900794{
795#ifdef CONFIG_NET_NS
Denis V. Lunevf3005d72008-04-16 02:02:18 -0700796 release_net(dev->nd_net);
797 dev->nd_net = hold_net(net);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900798#endif
799}
800
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700801/**
802 * netdev_priv - access network device private data
803 * @dev: network device
804 *
805 * Get network device private data
806 */
Patrick McHardy6472ce62007-06-13 12:03:21 -0700807static inline void *netdev_priv(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808{
David S. Millere3c50d52008-07-15 02:58:39 -0700809 return (char *)dev + ((sizeof(struct net_device)
810 + NETDEV_ALIGN_CONST)
811 & ~NETDEV_ALIGN_CONST);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812}
813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814/* Set the sysfs physical device reference for the network logical device
815 * if set prior to registration will cause a symlink during initialization.
816 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700817#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818
Stephen Hemminger3b582cc2007-11-01 02:21:47 -0700819/**
820 * netif_napi_add - initialize a napi context
821 * @dev: network device
822 * @napi: napi context
823 * @poll: polling function
824 * @weight: default weight
825 *
826 * netif_napi_add() must be used to initialize a napi context prior to calling
827 * *any* of the other napi related functions.
828 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700829static inline void netif_napi_add(struct net_device *dev,
830 struct napi_struct *napi,
831 int (*poll)(struct napi_struct *, int),
832 int weight)
833{
834 INIT_LIST_HEAD(&napi->poll_list);
835 napi->poll = poll;
836 napi->weight = weight;
837#ifdef CONFIG_NETPOLL
838 napi->dev = dev;
839 list_add(&napi->dev_list, &dev->napi_list);
840 spin_lock_init(&napi->poll_lock);
841 napi->poll_owner = -1;
842#endif
843 set_bit(NAPI_STATE_SCHED, &napi->state);
844}
845
Alexander Duyckd8156532008-07-08 15:13:05 -0700846/**
847 * netif_napi_del - remove a napi context
848 * @napi: napi context
849 *
850 * netif_napi_del() removes a napi context from the network device napi list
851 */
852static inline void netif_napi_del(struct napi_struct *napi)
853{
854#ifdef CONFIG_NETPOLL
855 list_del(&napi->dev_list);
856#endif
857}
858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859struct packet_type {
David S. Millerf2ccd8f2005-08-09 19:34:12 -0700860 __be16 type; /* This is really htons(ether_type). */
861 struct net_device *dev; /* NULL is wildcarded here */
862 int (*func) (struct sk_buff *,
863 struct net_device *,
864 struct packet_type *,
865 struct net_device *);
Herbert Xu576a30e2006-06-27 13:22:38 -0700866 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
867 int features);
Herbert Xua430a432006-07-08 13:34:56 -0700868 int (*gso_send_check)(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 void *af_packet_priv;
870 struct list_head list;
871};
872
873#include <linux/interrupt.h>
874#include <linux/notifier.h>
875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876extern rwlock_t dev_base_lock; /* Device list lock */
877
Eric W. Biederman881d9662007-09-17 11:56:21 -0700878
879#define for_each_netdev(net, d) \
880 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
881#define for_each_netdev_safe(net, d, n) \
882 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
883#define for_each_netdev_continue(net, d) \
884 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700885#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
886
Daniel Lezcanoa050c332007-09-12 14:57:09 +0200887static inline struct net_device *next_net_device(struct net_device *dev)
888{
889 struct list_head *lh;
890 struct net *net;
Pavel Emelianov7562f872007-05-03 15:13:45 -0700891
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900892 net = dev_net(dev);
Daniel Lezcanoa050c332007-09-12 14:57:09 +0200893 lh = dev->dev_list.next;
894 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
895}
896
897static inline struct net_device *first_net_device(struct net *net)
898{
899 return list_empty(&net->dev_base_head) ? NULL :
900 net_device_entry(net->dev_base_head.next);
901}
Pavel Emelianov7562f872007-05-03 15:13:45 -0700902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903extern int netdev_boot_setup_check(struct net_device *dev);
904extern unsigned long netdev_boot_base(const char *prefix, int unit);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700905extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
906extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
907extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908extern void dev_add_pack(struct packet_type *pt);
909extern void dev_remove_pack(struct packet_type *pt);
910extern void __dev_remove_pack(struct packet_type *pt);
911
Eric W. Biederman881d9662007-09-17 11:56:21 -0700912extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 unsigned short mask);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700914extern struct net_device *dev_get_by_name(struct net *net, const char *name);
915extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916extern int dev_alloc_name(struct net_device *dev, const char *name);
917extern int dev_open(struct net_device *dev);
918extern int dev_close(struct net_device *dev);
Ben Hutchings0187bdf2008-06-19 16:15:47 -0700919extern void dev_disable_lro(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920extern int dev_queue_xmit(struct sk_buff *skb);
921extern int register_netdevice(struct net_device *dev);
Stephen Hemminger22f8cde2007-02-07 00:09:58 -0800922extern void unregister_netdevice(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923extern void free_netdev(struct net_device *dev);
924extern void synchronize_net(void);
925extern int register_netdevice_notifier(struct notifier_block *nb);
926extern int unregister_netdevice_notifier(struct notifier_block *nb);
Eric W. Biedermanad7379d2007-09-16 15:33:32 -0700927extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700928extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
929extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930extern int dev_restart(struct net_device *dev);
931#ifdef CONFIG_NETPOLL_TRAP
932extern int netpoll_trap(void);
933#endif
934
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700935static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
936 unsigned short type,
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700937 const void *daddr, const void *saddr,
938 unsigned len)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700939{
Ursula Braunf1ecfd52007-10-22 16:16:14 +0200940 if (!dev->header_ops || !dev->header_ops->create)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700941 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700942
943 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700944}
945
Stephen Hemmingerb95cce32007-09-26 22:13:38 -0700946static inline int dev_parse_header(const struct sk_buff *skb,
947 unsigned char *haddr)
948{
949 const struct net_device *dev = skb->dev;
950
Patrick McHardy1b833362007-10-18 05:09:28 -0700951 if (!dev->header_ops || !dev->header_ops->parse)
Stephen Hemmingerb95cce32007-09-26 22:13:38 -0700952 return 0;
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700953 return dev->header_ops->parse(skb, haddr);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -0700954}
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
957extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
958static inline int unregister_gifconf(unsigned int family)
959{
960 return register_gifconf(family, NULL);
961}
962
963/*
964 * Incoming packets are placed on per-cpu queues so that
965 * no locking is needed.
966 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967struct softnet_data
968{
David S. Miller37437bb2008-07-16 02:15:04 -0700969 struct Qdisc *output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 struct sk_buff_head input_pkt_queue;
971 struct list_head poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 struct sk_buff *completion_queue;
973
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700974 struct napi_struct backlog;
Chris Leechdb217332006-06-17 21:24:58 -0700975#ifdef CONFIG_NET_DMA
976 struct dma_chan *net_dma;
977#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978};
979
980DECLARE_PER_CPU(struct softnet_data,softnet_data);
981
982#define HAVE_NETIF_QUEUE
983
David S. Miller37437bb2008-07-16 02:15:04 -0700984extern void __netif_schedule(struct Qdisc *q);
David S. Miller86d804e2008-07-08 23:11:25 -0700985
986static inline void netif_schedule_queue(struct netdev_queue *txq)
987{
David S. Miller79d16382008-07-08 23:14:46 -0700988 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -0700989 __netif_schedule(txq->qdisc);
David S. Miller86d804e2008-07-08 23:11:25 -0700990}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700992static inline void netif_tx_schedule_all(struct net_device *dev)
993{
994 unsigned int i;
995
996 for (i = 0; i < dev->num_tx_queues; i++)
997 netif_schedule_queue(netdev_get_tx_queue(dev, i));
998}
999
Dave Jonesd29f7492008-07-22 14:09:06 -07001000static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1001{
1002 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1003}
1004
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001005/**
1006 * netif_start_queue - allow transmit
1007 * @dev: network device
1008 *
1009 * Allow upper layers to call the device hard_start_xmit routine.
1010 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011static inline void netif_start_queue(struct net_device *dev)
1012{
David S. Millere8a04642008-07-17 00:34:19 -07001013 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014}
1015
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001016static inline void netif_tx_start_all_queues(struct net_device *dev)
1017{
1018 unsigned int i;
1019
1020 for (i = 0; i < dev->num_tx_queues; i++) {
1021 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1022 netif_tx_start_queue(txq);
1023 }
1024}
1025
David S. Miller79d16382008-07-08 23:14:46 -07001026static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
1028#ifdef CONFIG_NETPOLL_TRAP
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001029 if (netpoll_trap()) {
David S. Miller79d16382008-07-08 23:14:46 -07001030 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 return;
Sergei Shtylyov5f286e12007-04-28 20:57:37 -07001032 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033#endif
David S. Miller79d16382008-07-08 23:14:46 -07001034 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001035 __netif_schedule(dev_queue->qdisc);
David S. Miller79d16382008-07-08 23:14:46 -07001036}
1037
Dave Jonesd29f7492008-07-22 14:09:06 -07001038/**
1039 * netif_wake_queue - restart transmit
1040 * @dev: network device
1041 *
1042 * Allow upper layers to call the device hard_start_xmit routine.
1043 * Used for flow control when transmit resources are available.
1044 */
David S. Miller79d16382008-07-08 23:14:46 -07001045static inline void netif_wake_queue(struct net_device *dev)
1046{
David S. Millere8a04642008-07-17 00:34:19 -07001047 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048}
1049
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001050static inline void netif_tx_wake_all_queues(struct net_device *dev)
1051{
1052 unsigned int i;
1053
1054 for (i = 0; i < dev->num_tx_queues; i++) {
1055 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1056 netif_tx_wake_queue(txq);
1057 }
1058}
1059
Dave Jonesd29f7492008-07-22 14:09:06 -07001060static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1061{
1062 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1063}
1064
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001065/**
1066 * netif_stop_queue - stop transmitted packets
1067 * @dev: network device
1068 *
1069 * Stop upper layers calling the device hard_start_xmit routine.
1070 * Used for flow control when transmit resources are unavailable.
1071 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072static inline void netif_stop_queue(struct net_device *dev)
1073{
David S. Millere8a04642008-07-17 00:34:19 -07001074 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075}
1076
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001077static inline void netif_tx_stop_all_queues(struct net_device *dev)
1078{
1079 unsigned int i;
1080
1081 for (i = 0; i < dev->num_tx_queues; i++) {
1082 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1083 netif_tx_stop_queue(txq);
1084 }
1085}
1086
Dave Jonesd29f7492008-07-22 14:09:06 -07001087static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1088{
1089 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1090}
1091
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001092/**
1093 * netif_queue_stopped - test if transmit queue is flowblocked
1094 * @dev: network device
1095 *
1096 * Test if transmit queue on device is currently unable to send.
1097 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098static inline int netif_queue_stopped(const struct net_device *dev)
1099{
David S. Millere8a04642008-07-17 00:34:19 -07001100 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101}
1102
David S. Millerc3f26a22008-07-31 16:58:50 -07001103static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
1104{
1105 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
1106}
1107
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001108/**
1109 * netif_running - test if up
1110 * @dev: network device
1111 *
1112 * Test if the device has been brought up.
1113 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114static inline int netif_running(const struct net_device *dev)
1115{
1116 return test_bit(__LINK_STATE_START, &dev->state);
1117}
1118
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001119/*
1120 * Routines to manage the subqueues on a device. We only need start
1121 * stop, and a check if it's stopped. All other device management is
1122 * done at the overall netdevice level.
1123 * Also test the device if we're multiqueue.
1124 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001125
1126/**
1127 * netif_start_subqueue - allow sending packets on subqueue
1128 * @dev: network device
1129 * @queue_index: sub queue index
1130 *
1131 * Start individual transmit queue of a device with multiple transmit queues.
1132 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001133static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1134{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001135 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1136 clear_bit(__QUEUE_STATE_XOFF, &txq->state);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001137}
1138
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001139/**
1140 * netif_stop_subqueue - stop sending packets on subqueue
1141 * @dev: network device
1142 * @queue_index: sub queue index
1143 *
1144 * Stop individual transmit queue of a device with multiple transmit queues.
1145 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001146static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1147{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001148 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001149#ifdef CONFIG_NETPOLL_TRAP
1150 if (netpoll_trap())
1151 return;
1152#endif
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001153 set_bit(__QUEUE_STATE_XOFF, &txq->state);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001154}
1155
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001156/**
1157 * netif_subqueue_stopped - test status of subqueue
1158 * @dev: network device
1159 * @queue_index: sub queue index
1160 *
1161 * Check individual transmit queue of a device with multiple transmit queues.
1162 */
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001163static inline int __netif_subqueue_stopped(const struct net_device *dev,
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001164 u16 queue_index)
1165{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001166 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1167 return test_bit(__QUEUE_STATE_XOFF, &txq->state);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001168}
1169
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001170static inline int netif_subqueue_stopped(const struct net_device *dev,
1171 struct sk_buff *skb)
1172{
1173 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1174}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001175
1176/**
1177 * netif_wake_subqueue - allow sending packets on subqueue
1178 * @dev: network device
1179 * @queue_index: sub queue index
1180 *
1181 * Resume individual transmit queue of a device with multiple transmit queues.
1182 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001183static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1184{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001185 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001186#ifdef CONFIG_NETPOLL_TRAP
1187 if (netpoll_trap())
1188 return;
1189#endif
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001190 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
David S. Miller37437bb2008-07-16 02:15:04 -07001191 __netif_schedule(txq->qdisc);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001192}
1193
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001194/**
1195 * netif_is_multiqueue - test if device has multiple transmit queues
1196 * @dev: network device
1197 *
1198 * Check if device has multiple transmit queues
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001199 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001200static inline int netif_is_multiqueue(const struct net_device *dev)
1201{
David S. Miller09e83b52008-07-17 01:52:12 -07001202 return (dev->num_tx_queues > 1);
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001203}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
1205/* Use this variant when it is known for sure that it
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07001206 * is executing from hardware interrupt context or with hardware interrupts
1207 * disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001209extern void dev_kfree_skb_irq(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211/* Use this variant in places where it could be invoked
Matti Linnanvuori0ef47302008-03-28 16:33:00 -07001212 * from either hardware interrupt or other context, with hardware interrupts
1213 * either disabled or enabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001215extern void dev_kfree_skb_any(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217#define HAVE_NETIF_RX 1
1218extern int netif_rx(struct sk_buff *skb);
1219extern int netif_rx_ni(struct sk_buff *skb);
1220#define HAVE_NETIF_RECEIVE_SKB 1
1221extern int netif_receive_skb(struct sk_buff *skb);
Patrick McHardybc1d0412008-07-14 22:49:30 -07001222extern void netif_nit_deliver(struct sk_buff *skb);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08001223extern int dev_valid_name(const char *name);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001224extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1225extern int dev_ethtool(struct net *net, struct ifreq *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226extern unsigned dev_get_flags(const struct net_device *);
1227extern int dev_change_flags(struct net_device *, unsigned);
1228extern int dev_change_name(struct net_device *, char *);
Eric W. Biedermance286d32007-09-12 13:53:49 +02001229extern int dev_change_net_namespace(struct net_device *,
1230 struct net *, const char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231extern int dev_set_mtu(struct net_device *, int);
1232extern int dev_set_mac_address(struct net_device *,
1233 struct sockaddr *);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001234extern int dev_hard_start_xmit(struct sk_buff *skb,
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001235 struct net_device *dev,
1236 struct netdev_queue *txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001238extern int netdev_budget;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239
1240/* Called by rtnetlink.c:rtnl_unlock() */
1241extern void netdev_run_todo(void);
1242
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001243/**
1244 * dev_put - release reference to device
1245 * @dev: network device
1246 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07001247 * Release reference to device to allow it to be freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001248 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249static inline void dev_put(struct net_device *dev)
1250{
1251 atomic_dec(&dev->refcnt);
1252}
1253
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001254/**
1255 * dev_hold - get reference to device
1256 * @dev: network device
1257 *
Benjamin Thery9ef44292007-10-10 21:18:17 -07001258 * Hold reference to device to keep it from being freed.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001259 */
Stephen Hemminger15333062006-03-20 22:32:28 -08001260static inline void dev_hold(struct net_device *dev)
1261{
1262 atomic_inc(&dev->refcnt);
1263}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
1265/* Carrier loss detection, dial on demand. The functions netif_carrier_on
1266 * and _off may be called from IRQ context, but it is caller
1267 * who is responsible for serialization of these calls.
Stefan Rompfb00055a2006-03-20 17:09:11 -08001268 *
1269 * The name carrier is inappropriate, these functions should really be
1270 * called netif_lowerlayer_*() because they represent the state of any
1271 * kind of lower layer not just hardware media.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 */
1273
1274extern void linkwatch_fire_event(struct net_device *dev);
1275
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001276/**
1277 * netif_carrier_ok - test if carrier present
1278 * @dev: network device
1279 *
1280 * Check if carrier is present on device
1281 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282static inline int netif_carrier_ok(const struct net_device *dev)
1283{
1284 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1285}
1286
1287extern void __netdev_watchdog_up(struct net_device *dev);
1288
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07001289extern void netif_carrier_on(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
Denis Vlasenko0a242ef2005-08-11 15:32:53 -07001291extern void netif_carrier_off(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001293/**
1294 * netif_dormant_on - mark device as dormant.
1295 * @dev: network device
1296 *
1297 * Mark device as dormant (as per RFC2863).
1298 *
1299 * The dormant state indicates that the relevant interface is not
1300 * actually in a condition to pass packets (i.e., it is not 'up') but is
1301 * in a "pending" state, waiting for some external event. For "on-
1302 * demand" interfaces, this new state identifies the situation where the
1303 * interface is waiting for events to place it in the up state.
1304 *
1305 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001306static inline void netif_dormant_on(struct net_device *dev)
1307{
1308 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1309 linkwatch_fire_event(dev);
1310}
1311
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001312/**
1313 * netif_dormant_off - set device as not dormant.
1314 * @dev: network device
1315 *
1316 * Device is not in dormant state.
1317 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001318static inline void netif_dormant_off(struct net_device *dev)
1319{
1320 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1321 linkwatch_fire_event(dev);
1322}
1323
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001324/**
1325 * netif_dormant - test if carrier present
1326 * @dev: network device
1327 *
1328 * Check if carrier is present on device
1329 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001330static inline int netif_dormant(const struct net_device *dev)
1331{
1332 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1333}
1334
1335
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001336/**
1337 * netif_oper_up - test if device is operational
1338 * @dev: network device
1339 *
1340 * Check if carrier is operational
1341 */
Stefan Rompfb00055a2006-03-20 17:09:11 -08001342static inline int netif_oper_up(const struct net_device *dev) {
1343 return (dev->operstate == IF_OPER_UP ||
1344 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1345}
1346
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001347/**
1348 * netif_device_present - is device available or removed
1349 * @dev: network device
1350 *
1351 * Check if device has not been removed from system.
1352 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353static inline int netif_device_present(struct net_device *dev)
1354{
1355 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1356}
1357
Denis Vlasenko56079432006-03-29 15:57:29 -08001358extern void netif_device_detach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
Denis Vlasenko56079432006-03-29 15:57:29 -08001360extern void netif_device_attach(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
1362/*
1363 * Network interface message level settings
1364 */
1365#define HAVE_NETIF_MSG 1
1366
1367enum {
1368 NETIF_MSG_DRV = 0x0001,
1369 NETIF_MSG_PROBE = 0x0002,
1370 NETIF_MSG_LINK = 0x0004,
1371 NETIF_MSG_TIMER = 0x0008,
1372 NETIF_MSG_IFDOWN = 0x0010,
1373 NETIF_MSG_IFUP = 0x0020,
1374 NETIF_MSG_RX_ERR = 0x0040,
1375 NETIF_MSG_TX_ERR = 0x0080,
1376 NETIF_MSG_TX_QUEUED = 0x0100,
1377 NETIF_MSG_INTR = 0x0200,
1378 NETIF_MSG_TX_DONE = 0x0400,
1379 NETIF_MSG_RX_STATUS = 0x0800,
1380 NETIF_MSG_PKTDATA = 0x1000,
1381 NETIF_MSG_HW = 0x2000,
1382 NETIF_MSG_WOL = 0x4000,
1383};
1384
1385#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1386#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1387#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1388#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1389#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1390#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1391#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1392#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1393#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1394#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1395#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1396#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1397#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1398#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1399#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1400
1401static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1402{
1403 /* use default */
1404 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1405 return default_msg_enable_bits;
1406 if (debug_value == 0) /* no output */
1407 return 0;
1408 /* set low N bits */
1409 return (1 << debug_value) - 1;
1410}
1411
shemminger@osdl.org0a122572005-11-30 11:45:17 -08001412/* Test if receive needs to be scheduled but only if up */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001413static inline int netif_rx_schedule_prep(struct net_device *dev,
1414 struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415{
David S. Millera0a46192008-01-07 20:35:07 -08001416 return napi_schedule_prep(napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417}
1418
1419/* Add interface to tail of rx poll list. This assumes that _prep has
1420 * already been called and returned 1.
1421 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001422static inline void __netif_rx_schedule(struct net_device *dev,
1423 struct napi_struct *napi)
1424{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001425 __napi_schedule(napi);
1426}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
1428/* Try to reschedule poll. Called by irq handler. */
1429
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001430static inline void netif_rx_schedule(struct net_device *dev,
1431 struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001433 if (netif_rx_schedule_prep(dev, napi))
1434 __netif_rx_schedule(dev, napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435}
1436
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001437/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
1438static inline int netif_rx_reschedule(struct net_device *dev,
1439 struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001441 if (napi_schedule_prep(napi)) {
1442 __netif_rx_schedule(dev, napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 return 1;
1444 }
1445 return 0;
1446}
1447
Herbert Xub0ba6662007-05-29 13:22:52 -07001448/* same as netif_rx_complete, except that local_irq_save(flags)
1449 * has already been issued
1450 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001451static inline void __netif_rx_complete(struct net_device *dev,
1452 struct napi_struct *napi)
Herbert Xub0ba6662007-05-29 13:22:52 -07001453{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001454 __napi_complete(napi);
Herbert Xub0ba6662007-05-29 13:22:52 -07001455}
1456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457/* Remove interface from poll list: it must be in the poll list
1458 * on current cpu. This primitive is called by dev->poll(), when
1459 * it completes the work. The device cannot be out of poll list at this
1460 * moment, it is BUG().
1461 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001462static inline void netif_rx_complete(struct net_device *dev,
1463 struct napi_struct *napi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464{
1465 unsigned long flags;
1466
1467 local_irq_save(flags);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001468 __netif_rx_complete(dev, napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 local_irq_restore(flags);
1470}
1471
David S. Millerc773e842008-07-08 23:13:53 -07001472static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
Herbert Xu932ff272006-06-09 12:20:56 -07001473{
David S. Millerc773e842008-07-08 23:13:53 -07001474 spin_lock(&txq->_xmit_lock);
1475 txq->xmit_lock_owner = cpu;
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001476}
1477
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001478static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1479{
1480 spin_lock_bh(&txq->_xmit_lock);
1481 txq->xmit_lock_owner = smp_processor_id();
1482}
1483
David S. Millerc773e842008-07-08 23:13:53 -07001484static inline int __netif_tx_trylock(struct netdev_queue *txq)
1485{
1486 int ok = spin_trylock(&txq->_xmit_lock);
1487 if (likely(ok))
1488 txq->xmit_lock_owner = smp_processor_id();
1489 return ok;
Herbert Xu932ff272006-06-09 12:20:56 -07001490}
1491
David S. Millerc773e842008-07-08 23:13:53 -07001492static inline void __netif_tx_unlock(struct netdev_queue *txq)
1493{
1494 txq->xmit_lock_owner = -1;
1495 spin_unlock(&txq->_xmit_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07001496}
1497
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001498static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1499{
1500 txq->xmit_lock_owner = -1;
1501 spin_unlock_bh(&txq->_xmit_lock);
1502}
1503
David S. Millerc3f26a22008-07-31 16:58:50 -07001504/**
1505 * netif_tx_lock - grab network device transmit lock
1506 * @dev: network device
1507 * @cpu: cpu number of lock owner
1508 *
1509 * Get network device transmit lock
1510 */
1511static inline void netif_tx_lock(struct net_device *dev)
1512{
1513 unsigned int i;
1514 int cpu;
1515
1516 spin_lock(&dev->tx_global_lock);
1517 cpu = smp_processor_id();
1518 for (i = 0; i < dev->num_tx_queues; i++) {
1519 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1520
1521 /* We are the only thread of execution doing a
1522 * freeze, but we have to grab the _xmit_lock in
1523 * order to synchronize with threads which are in
1524 * the ->hard_start_xmit() handler and already
1525 * checked the frozen bit.
1526 */
1527 __netif_tx_lock(txq, cpu);
1528 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
1529 __netif_tx_unlock(txq);
1530 }
1531}
1532
1533static inline void netif_tx_lock_bh(struct net_device *dev)
1534{
1535 local_bh_disable();
1536 netif_tx_lock(dev);
1537}
1538
Herbert Xu932ff272006-06-09 12:20:56 -07001539static inline void netif_tx_unlock(struct net_device *dev)
1540{
David S. Millere8a04642008-07-17 00:34:19 -07001541 unsigned int i;
David S. Millerc773e842008-07-08 23:13:53 -07001542
David S. Millere8a04642008-07-17 00:34:19 -07001543 for (i = 0; i < dev->num_tx_queues; i++) {
1544 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millere8a04642008-07-17 00:34:19 -07001545
David S. Millerc3f26a22008-07-31 16:58:50 -07001546 /* No need to grab the _xmit_lock here. If the
1547 * queue is not stopped for another reason, we
1548 * force a schedule.
1549 */
1550 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
1551 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1552 __netif_schedule(txq->qdisc);
1553 }
1554 spin_unlock(&dev->tx_global_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07001555}
1556
1557static inline void netif_tx_unlock_bh(struct net_device *dev)
1558{
David S. Millere8a04642008-07-17 00:34:19 -07001559 netif_tx_unlock(dev);
1560 local_bh_enable();
Herbert Xu932ff272006-06-09 12:20:56 -07001561}
1562
David S. Millerc773e842008-07-08 23:13:53 -07001563#define HARD_TX_LOCK(dev, txq, cpu) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001564 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07001565 __netif_tx_lock(txq, cpu); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001566 } \
1567}
1568
David S. Millerc773e842008-07-08 23:13:53 -07001569#define HARD_TX_UNLOCK(dev, txq) { \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001570 if ((dev->features & NETIF_F_LLTX) == 0) { \
David S. Millerc773e842008-07-08 23:13:53 -07001571 __netif_tx_unlock(txq); \
Jamal Hadi Salim22dd7492007-09-16 14:40:49 -07001572 } \
1573}
1574
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575static inline void netif_tx_disable(struct net_device *dev)
1576{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001577 unsigned int i;
David S. Millerc3f26a22008-07-31 16:58:50 -07001578 int cpu;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001579
David S. Millerc3f26a22008-07-31 16:58:50 -07001580 local_bh_disable();
1581 cpu = smp_processor_id();
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001582 for (i = 0; i < dev->num_tx_queues; i++) {
1583 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
David S. Millerc3f26a22008-07-31 16:58:50 -07001584
1585 __netif_tx_lock(txq, cpu);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001586 netif_tx_stop_queue(txq);
David S. Millerc3f26a22008-07-31 16:58:50 -07001587 __netif_tx_unlock(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001588 }
David S. Millerc3f26a22008-07-31 16:58:50 -07001589 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590}
1591
David S. Millere308a5d2008-07-15 00:13:44 -07001592static inline void netif_addr_lock(struct net_device *dev)
1593{
1594 spin_lock(&dev->addr_list_lock);
1595}
1596
1597static inline void netif_addr_lock_bh(struct net_device *dev)
1598{
1599 spin_lock_bh(&dev->addr_list_lock);
1600}
1601
1602static inline void netif_addr_unlock(struct net_device *dev)
1603{
1604 spin_unlock(&dev->addr_list_lock);
1605}
1606
1607static inline void netif_addr_unlock_bh(struct net_device *dev)
1608{
1609 spin_unlock_bh(&dev->addr_list_lock);
1610}
1611
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612/* These functions live elsewhere (drivers/net/net_init.c, but related) */
1613
1614extern void ether_setup(struct net_device *dev);
1615
1616/* Support for loadable net-drivers */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001617extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1618 void (*setup)(struct net_device *),
1619 unsigned int queue_count);
1620#define alloc_netdev(sizeof_priv, name, setup) \
1621 alloc_netdev_mq(sizeof_priv, name, setup, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622extern int register_netdev(struct net_device *dev);
1623extern void unregister_netdev(struct net_device *dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07001624/* Functions used for secondary unicast and multicast support */
1625extern void dev_set_rx_mode(struct net_device *dev);
1626extern void __dev_set_rx_mode(struct net_device *dev);
1627extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
1628extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
Chris Leeche83a2ea2008-01-31 16:53:23 -08001629extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1630extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1632extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
Patrick McHardya0a400d2007-07-14 18:52:02 -07001633extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1634extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07001635extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1636extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
Chris Leeche83a2ea2008-01-31 16:53:23 -08001637extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1638extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
Wang Chendad9b332008-06-18 01:48:28 -07001639extern int dev_set_promiscuity(struct net_device *dev, int inc);
1640extern int dev_set_allmulti(struct net_device *dev, int inc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641extern void netdev_state_change(struct net_device *dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001642extern void netdev_bonding_change(struct net_device *dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001643extern void netdev_features_change(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644/* Load a device via the kmod */
Eric W. Biederman881d9662007-09-17 11:56:21 -07001645extern void dev_load(struct net *net, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646extern void dev_mcast_init(void);
1647extern int netdev_max_backlog;
1648extern int weight_p;
1649extern int netdev_set_master(struct net_device *dev, struct net_device *master);
Patrick McHardy84fa7932006-08-29 16:44:56 -07001650extern int skb_checksum_help(struct sk_buff *skb);
Herbert Xu576a30e2006-06-27 13:22:38 -07001651extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
Herbert Xufb286bb2005-11-10 13:01:24 -08001652#ifdef CONFIG_BUG
1653extern void netdev_rx_csum_fault(struct net_device *dev);
1654#else
1655static inline void netdev_rx_csum_fault(struct net_device *dev)
1656{
1657}
1658#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659/* rx skb timestamps */
1660extern void net_enable_timestamp(void);
1661extern void net_disable_timestamp(void);
1662
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001663#ifdef CONFIG_PROC_FS
1664extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1665extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1666extern void dev_seq_stop(struct seq_file *seq, void *v);
1667#endif
1668
Jay Vosburghb8a97872008-06-13 18:12:04 -07001669extern int netdev_class_create_file(struct class_attribute *class_attr);
1670extern void netdev_class_remove_file(struct class_attribute *class_attr);
1671
Arjan van de Ven6579e572008-07-21 13:31:48 -07001672extern char *netdev_drivername(struct net_device *dev, char *buffer, int len);
1673
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001674extern void linkwatch_run_queue(void);
1675
Herbert Xu7f353bf2007-08-10 15:47:58 -07001676extern int netdev_compute_features(unsigned long all, unsigned long one);
1677
Herbert Xubcd76112006-06-30 13:36:35 -07001678static inline int net_gso_ok(int features, int gso_type)
1679{
1680 int feature = gso_type << NETIF_F_GSO_SHIFT;
1681 return (features & feature) == feature;
1682}
1683
Herbert Xu576a30e2006-06-27 13:22:38 -07001684static inline int skb_gso_ok(struct sk_buff *skb, int features)
1685{
Herbert Xua430a432006-07-08 13:34:56 -07001686 return net_gso_ok(features, skb_shinfo(skb)->gso_type);
Herbert Xu576a30e2006-06-27 13:22:38 -07001687}
1688
Herbert Xu79671682006-06-22 02:40:14 -07001689static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1690{
Herbert Xua430a432006-07-08 13:34:56 -07001691 return skb_is_gso(skb) &&
1692 (!skb_gso_ok(skb, dev->features) ||
Patrick McHardy84fa7932006-08-29 16:44:56 -07001693 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
Herbert Xu79671682006-06-22 02:40:14 -07001694}
1695
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001696static inline void netif_set_gso_max_size(struct net_device *dev,
1697 unsigned int size)
1698{
1699 dev->gso_max_size = size;
1700}
1701
David S. Miller7ea49ed2006-08-14 17:08:36 -07001702/* On bonding slaves other than the currently active slave, suppress
Jay Vosburghf5b2b962006-09-22 21:54:53 -07001703 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1704 * ARP on active-backup slaves with arp_validate enabled.
David S. Miller7ea49ed2006-08-14 17:08:36 -07001705 */
1706static inline int skb_bond_should_drop(struct sk_buff *skb)
1707{
1708 struct net_device *dev = skb->dev;
1709 struct net_device *master = dev->master;
1710
1711 if (master &&
1712 (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
Jay Vosburghf5b2b962006-09-22 21:54:53 -07001713 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1714 skb->protocol == __constant_htons(ETH_P_ARP))
1715 return 0;
1716
David S. Miller7ea49ed2006-08-14 17:08:36 -07001717 if (master->priv_flags & IFF_MASTER_ALB) {
1718 if (skb->pkt_type != PACKET_BROADCAST &&
1719 skb->pkt_type != PACKET_MULTICAST)
1720 return 0;
1721 }
1722 if (master->priv_flags & IFF_MASTER_8023AD &&
1723 skb->protocol == __constant_htons(ETH_P_SLOW))
1724 return 0;
1725
1726 return 1;
1727 }
1728 return 0;
1729}
1730
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731#endif /* __KERNEL__ */
1732
1733#endif /* _LINUX_DEV_H */