| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Common code for low-level network console, dump, and debugger code | 
 | 3 |  * | 
 | 4 |  * Derived from netconsole, kgdb-over-ethernet, and netdump patches | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | #ifndef _LINUX_NETPOLL_H | 
 | 8 | #define _LINUX_NETPOLL_H | 
 | 9 |  | 
 | 10 | #include <linux/netdevice.h> | 
 | 11 | #include <linux/interrupt.h> | 
| Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 12 | #include <linux/rcupdate.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/list.h> | 
 | 14 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | struct netpoll { | 
 | 16 | 	struct net_device *dev; | 
| Stephen Hemminger | bf6bce7 | 2006-10-26 15:46:56 -0700 | [diff] [blame] | 17 | 	char dev_name[IFNAMSIZ]; | 
 | 18 | 	const char *name; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | 	void (*rx_hook)(struct netpoll *, int, char *, int); | 
| Stephen Hemminger | 5de4a47 | 2006-10-26 15:46:55 -0700 | [diff] [blame] | 20 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | 	u32 local_ip, remote_ip; | 
 | 22 | 	u16 local_port, remote_port; | 
| Stephen Hemminger | bf6bce7 | 2006-10-26 15:46:56 -0700 | [diff] [blame] | 23 |  	u8 local_mac[ETH_ALEN], remote_mac[ETH_ALEN]; | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 24 | }; | 
 | 25 |  | 
 | 26 | struct netpoll_info { | 
| Stephen Hemminger | 93ec2c7 | 2006-10-26 15:46:50 -0700 | [diff] [blame] | 27 | 	atomic_t refcnt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | 	spinlock_t poll_lock; | 
 | 29 | 	int poll_owner; | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 30 | 	int rx_flags; | 
| Jeff Moyer | fbeec2e | 2005-06-22 22:05:59 -0700 | [diff] [blame] | 31 | 	spinlock_t rx_lock; | 
 | 32 | 	struct netpoll *rx_np; /* netpoll that registered an rx_hook */ | 
| Neil Horman | 068c6e9 | 2006-06-26 00:04:27 -0700 | [diff] [blame] | 33 | 	struct sk_buff_head arp_tx; /* list of arp requests to reply to */ | 
| Stephen Hemminger | b6cd27e | 2006-10-26 15:46:51 -0700 | [diff] [blame] | 34 | 	struct sk_buff_head txq; | 
| David Howells | 6d5aefb | 2006-12-05 19:36:26 +0000 | [diff] [blame] | 35 | 	struct delayed_work tx_work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | }; | 
 | 37 |  | 
 | 38 | void netpoll_poll(struct netpoll *np); | 
 | 39 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len); | 
 | 40 | int netpoll_parse_options(struct netpoll *np, char *opt); | 
 | 41 | int netpoll_setup(struct netpoll *np); | 
 | 42 | int netpoll_trap(void); | 
 | 43 | void netpoll_set_trap(int trap); | 
 | 44 | void netpoll_cleanup(struct netpoll *np); | 
 | 45 | int __netpoll_rx(struct sk_buff *skb); | 
| Stephen Hemminger | 5de4a47 | 2006-10-26 15:46:55 -0700 | [diff] [blame] | 46 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
 | 48 | #ifdef CONFIG_NETPOLL | 
 | 49 | static inline int netpoll_rx(struct sk_buff *skb) | 
 | 50 | { | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 51 | 	struct netpoll_info *npinfo = skb->dev->npinfo; | 
| Jeff Moyer | fbeec2e | 2005-06-22 22:05:59 -0700 | [diff] [blame] | 52 | 	unsigned long flags; | 
 | 53 | 	int ret = 0; | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 54 |  | 
| Jeff Moyer | fbeec2e | 2005-06-22 22:05:59 -0700 | [diff] [blame] | 55 | 	if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags)) | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 56 | 		return 0; | 
 | 57 |  | 
| Jeff Moyer | fbeec2e | 2005-06-22 22:05:59 -0700 | [diff] [blame] | 58 | 	spin_lock_irqsave(&npinfo->rx_lock, flags); | 
 | 59 | 	/* check rx_flags again with the lock held */ | 
 | 60 | 	if (npinfo->rx_flags && __netpoll_rx(skb)) | 
 | 61 | 		ret = 1; | 
 | 62 | 	spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 
 | 63 |  | 
 | 64 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | } | 
 | 66 |  | 
| Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 67 | static inline void *netpoll_poll_lock(struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | { | 
| Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 69 | 	rcu_read_lock(); /* deal with race on ->npinfo */ | 
| Jeff Moyer | 115c1d6 | 2005-06-22 22:05:31 -0700 | [diff] [blame] | 70 | 	if (dev->npinfo) { | 
 | 71 | 		spin_lock(&dev->npinfo->poll_lock); | 
 | 72 | 		dev->npinfo->poll_owner = smp_processor_id(); | 
| Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 73 | 		return dev->npinfo; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | 	} | 
| Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 75 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | } | 
 | 77 |  | 
| Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 78 | static inline void netpoll_poll_unlock(void *have) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | { | 
| Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 80 | 	struct netpoll_info *npi = have; | 
 | 81 |  | 
 | 82 | 	if (npi) { | 
 | 83 | 		npi->poll_owner = -1; | 
 | 84 | 		spin_unlock(&npi->poll_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | 	} | 
| Matt Mackall | 53fb95d | 2005-08-11 19:27:43 -0700 | [diff] [blame] | 86 | 	rcu_read_unlock(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | } | 
 | 88 |  | 
 | 89 | #else | 
 | 90 | #define netpoll_rx(a) 0 | 
| Ben Dooks | afb997c | 2005-10-12 15:12:21 -0700 | [diff] [blame] | 91 | #define netpoll_poll_lock(a) NULL | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | #define netpoll_poll_unlock(a) | 
 | 93 | #endif | 
 | 94 |  | 
 | 95 | #endif |