| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * tg3.c: Broadcom Tigon3 ethernet driver. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) | 
 | 5 |  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) | 
 | 6 |  * Copyright (C) 2004 Sun Microsystems Inc. | 
| Matt Carlson | 0d2a506 | 2009-02-25 14:40:42 +0000 | [diff] [blame] | 7 |  * Copyright (C) 2005-2009 Broadcom Corporation. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 |  * | 
 | 9 |  * Firmware is: | 
| Michael Chan | 49cabf4 | 2005-06-06 15:15:17 -0700 | [diff] [blame] | 10 |  *	Derived from proprietary unpublished source code, | 
 | 11 |  *	Copyright (C) 2000-2003 Broadcom Corporation. | 
 | 12 |  * | 
 | 13 |  *	Permission is hereby granted for the distribution of this firmware | 
 | 14 |  *	data in hexadecimal or equivalent format, provided this copyright | 
 | 15 |  *	notice is accompanying it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  */ | 
 | 17 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  | 
 | 19 | #include <linux/module.h> | 
 | 20 | #include <linux/moduleparam.h> | 
 | 21 | #include <linux/kernel.h> | 
 | 22 | #include <linux/types.h> | 
 | 23 | #include <linux/compiler.h> | 
 | 24 | #include <linux/slab.h> | 
 | 25 | #include <linux/delay.h> | 
| Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 26 | #include <linux/in.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/init.h> | 
 | 28 | #include <linux/ioport.h> | 
 | 29 | #include <linux/pci.h> | 
 | 30 | #include <linux/netdevice.h> | 
 | 31 | #include <linux/etherdevice.h> | 
 | 32 | #include <linux/skbuff.h> | 
 | 33 | #include <linux/ethtool.h> | 
 | 34 | #include <linux/mii.h> | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 35 | #include <linux/phy.h> | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 36 | #include <linux/brcmphy.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/if_vlan.h> | 
 | 38 | #include <linux/ip.h> | 
 | 39 | #include <linux/tcp.h> | 
 | 40 | #include <linux/workqueue.h> | 
| Michael Chan | 6148748 | 2005-09-05 17:53:19 -0700 | [diff] [blame] | 41 | #include <linux/prefetch.h> | 
| Tobias Klauser | f9a5f7d | 2005-10-29 15:09:26 +0200 | [diff] [blame] | 42 | #include <linux/dma-mapping.h> | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 43 | #include <linux/firmware.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  | 
 | 45 | #include <net/checksum.h> | 
| Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 46 | #include <net/ip.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
 | 48 | #include <asm/system.h> | 
 | 49 | #include <asm/io.h> | 
 | 50 | #include <asm/byteorder.h> | 
 | 51 | #include <asm/uaccess.h> | 
 | 52 |  | 
| David S. Miller | 49b6e95f | 2007-03-29 01:38:42 -0700 | [diff] [blame] | 53 | #ifdef CONFIG_SPARC | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #include <asm/idprom.h> | 
| David S. Miller | 49b6e95f | 2007-03-29 01:38:42 -0700 | [diff] [blame] | 55 | #include <asm/prom.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #endif | 
 | 57 |  | 
| Matt Carlson | 6353239 | 2008-11-03 16:49:57 -0800 | [diff] [blame] | 58 | #define BAR_0	0 | 
 | 59 | #define BAR_2	2 | 
 | 60 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 
 | 62 | #define TG3_VLAN_TAG_USED 1 | 
 | 63 | #else | 
 | 64 | #define TG3_VLAN_TAG_USED 0 | 
 | 65 | #endif | 
 | 66 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | #include "tg3.h" | 
 | 68 |  | 
 | 69 | #define DRV_MODULE_NAME		"tg3" | 
 | 70 | #define PFX DRV_MODULE_NAME	": " | 
| Matt Carlson | cceea98 | 2009-12-03 08:36:25 +0000 | [diff] [blame] | 71 | #define DRV_MODULE_VERSION	"3.105" | 
 | 72 | #define DRV_MODULE_RELDATE	"December 2, 2009" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 |  | 
 | 74 | #define TG3_DEF_MAC_MODE	0 | 
 | 75 | #define TG3_DEF_RX_MODE		0 | 
 | 76 | #define TG3_DEF_TX_MODE		0 | 
 | 77 | #define TG3_DEF_MSG_ENABLE	  \ | 
 | 78 | 	(NETIF_MSG_DRV		| \ | 
 | 79 | 	 NETIF_MSG_PROBE	| \ | 
 | 80 | 	 NETIF_MSG_LINK		| \ | 
 | 81 | 	 NETIF_MSG_TIMER	| \ | 
 | 82 | 	 NETIF_MSG_IFDOWN	| \ | 
 | 83 | 	 NETIF_MSG_IFUP		| \ | 
 | 84 | 	 NETIF_MSG_RX_ERR	| \ | 
 | 85 | 	 NETIF_MSG_TX_ERR) | 
 | 86 |  | 
 | 87 | /* length of time before we decide the hardware is borked, | 
 | 88 |  * and dev->tx_timeout() should be called to fix the problem | 
 | 89 |  */ | 
 | 90 | #define TG3_TX_TIMEOUT			(5 * HZ) | 
 | 91 |  | 
 | 92 | /* hardware minimum and maximum for a single frame's data payload */ | 
 | 93 | #define TG3_MIN_MTU			60 | 
 | 94 | #define TG3_MAX_MTU(tp)	\ | 
| Matt Carlson | 8f666b0 | 2009-08-28 13:58:24 +0000 | [diff] [blame] | 95 | 	((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  | 
 | 97 | /* These numbers seem to be hard coded in the NIC firmware somehow. | 
 | 98 |  * You can't change the ring sizes, but you can change where you place | 
 | 99 |  * them in the NIC onboard memory. | 
 | 100 |  */ | 
 | 101 | #define TG3_RX_RING_SIZE		512 | 
 | 102 | #define TG3_DEF_RX_RING_PENDING		200 | 
 | 103 | #define TG3_RX_JUMBO_RING_SIZE		256 | 
 | 104 | #define TG3_DEF_RX_JUMBO_RING_PENDING	100 | 
| Matt Carlson | baf8a94 | 2009-09-01 13:13:00 +0000 | [diff] [blame] | 105 | #define TG3_RSS_INDIR_TBL_SIZE 128 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 |  | 
 | 107 | /* Do not place this n-ring entries value into the tp struct itself, | 
 | 108 |  * we really want to expose these constants to GCC so that modulo et | 
 | 109 |  * al.  operations are done with shifts and masks instead of with | 
 | 110 |  * hw multiply/modulo instructions.  Another solution would be to | 
 | 111 |  * replace things like '% foo' with '& (foo - 1)'. | 
 | 112 |  */ | 
 | 113 | #define TG3_RX_RCB_RING_SIZE(tp)	\ | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 114 | 	(((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \ | 
| Matt Carlson | 5ea1c50 | 2009-09-11 16:50:16 -0700 | [diff] [blame] | 115 | 	  !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 |  | 
 | 117 | #define TG3_TX_RING_SIZE		512 | 
 | 118 | #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1) | 
 | 119 |  | 
 | 120 | #define TG3_RX_RING_BYTES	(sizeof(struct tg3_rx_buffer_desc) * \ | 
 | 121 | 				 TG3_RX_RING_SIZE) | 
| Matt Carlson | 79ed5ac | 2009-08-28 14:00:55 +0000 | [diff] [blame] | 122 | #define TG3_RX_JUMBO_RING_BYTES	(sizeof(struct tg3_ext_rx_buffer_desc) * \ | 
 | 123 | 				 TG3_RX_JUMBO_RING_SIZE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ | 
| Matt Carlson | 79ed5ac | 2009-08-28 14:00:55 +0000 | [diff] [blame] | 125 | 				 TG3_RX_RCB_RING_SIZE(tp)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \ | 
 | 127 | 				 TG3_TX_RING_SIZE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 
 | 129 |  | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 130 | #define TG3_DMA_BYTE_ENAB		64 | 
 | 131 |  | 
 | 132 | #define TG3_RX_STD_DMA_SZ		1536 | 
 | 133 | #define TG3_RX_JMB_DMA_SZ		9046 | 
 | 134 |  | 
 | 135 | #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB) | 
 | 136 |  | 
 | 137 | #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) | 
 | 138 | #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 |  | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 140 | #define TG3_RX_STD_BUFF_RING_SIZE \ | 
 | 141 | 	(sizeof(struct ring_info) * TG3_RX_RING_SIZE) | 
 | 142 |  | 
 | 143 | #define TG3_RX_JMB_BUFF_RING_SIZE \ | 
 | 144 | 	(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) | 
 | 145 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | /* minimum number of free TX descriptors required to wake up TX process */ | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 147 | #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 |  | 
| Matt Carlson | ad82926 | 2008-11-21 17:16:16 -0800 | [diff] [blame] | 149 | #define TG3_RAW_IP_ALIGN 2 | 
 | 150 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | /* number of ETHTOOL_GSTATS u64's */ | 
 | 152 | #define TG3_NUM_STATS		(sizeof(struct tg3_ethtool_stats)/sizeof(u64)) | 
 | 153 |  | 
| Michael Chan | 4cafd3f | 2005-05-29 14:56:34 -0700 | [diff] [blame] | 154 | #define TG3_NUM_TEST		6 | 
 | 155 |  | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 156 | #define FIRMWARE_TG3		"tigon/tg3.bin" | 
 | 157 | #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin" | 
 | 158 | #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin" | 
 | 159 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | static char version[] __devinitdata = | 
 | 161 | 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 
 | 162 |  | 
 | 163 | MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); | 
 | 164 | MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); | 
 | 165 | MODULE_LICENSE("GPL"); | 
 | 166 | MODULE_VERSION(DRV_MODULE_VERSION); | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 167 | MODULE_FIRMWARE(FIRMWARE_TG3); | 
 | 168 | MODULE_FIRMWARE(FIRMWARE_TG3TSO); | 
 | 169 | MODULE_FIRMWARE(FIRMWARE_TG3TSO5); | 
 | 170 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 171 | #define TG3_RSS_MIN_NUM_MSIX_VECS	2 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 |  | 
 | 173 | static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */ | 
 | 174 | module_param(tg3_debug, int, 0); | 
 | 175 | MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); | 
 | 176 |  | 
 | 177 | static struct pci_device_id tg3_pci_tbl[] = { | 
| Henrik Kretzschmar | 1318521 | 2006-08-22 00:28:33 -0700 | [diff] [blame] | 178 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, | 
 | 179 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, | 
 | 180 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, | 
 | 181 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, | 
 | 182 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, | 
 | 183 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, | 
 | 184 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, | 
 | 185 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, | 
 | 186 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, | 
 | 187 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, | 
 | 188 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, | 
 | 189 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, | 
 | 190 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, | 
 | 191 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, | 
 | 192 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, | 
 | 193 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, | 
 | 194 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, | 
 | 195 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, | 
 | 196 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, | 
 | 197 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, | 
 | 198 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, | 
 | 199 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, | 
 | 200 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)}, | 
 | 201 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, | 
| Michael Chan | 126a336 | 2006-09-27 16:03:07 -0700 | [diff] [blame] | 202 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, | 
| Henrik Kretzschmar | 1318521 | 2006-08-22 00:28:33 -0700 | [diff] [blame] | 203 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, | 
 | 204 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, | 
 | 205 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)}, | 
 | 206 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, | 
 | 207 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, | 
 | 208 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, | 
 | 209 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, | 
 | 210 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, | 
 | 211 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, | 
 | 212 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, | 
 | 213 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, | 
 | 214 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, | 
 | 215 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, | 
 | 216 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, | 
| Michael Chan | 126a336 | 2006-09-27 16:03:07 -0700 | [diff] [blame] | 217 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, | 
| Henrik Kretzschmar | 1318521 | 2006-08-22 00:28:33 -0700 | [diff] [blame] | 218 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, | 
 | 219 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, | 
 | 220 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, | 
| Michael Chan | 676917d | 2006-12-07 00:20:22 -0800 | [diff] [blame] | 221 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, | 
| Henrik Kretzschmar | 1318521 | 2006-08-22 00:28:33 -0700 | [diff] [blame] | 222 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, | 
 | 223 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, | 
 | 224 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, | 
 | 225 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, | 
 | 226 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, | 
 | 227 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, | 
 | 228 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 229 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, | 
 | 230 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 231 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, | 
 | 232 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, | 
| Matt Carlson | 6c7af27 | 2007-10-21 16:12:02 -0700 | [diff] [blame] | 233 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 234 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, | 
 | 235 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, | 
| Matt Carlson | c88e668 | 2008-11-03 16:49:18 -0800 | [diff] [blame] | 236 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, | 
 | 237 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, | 
| Matt Carlson | 2befdce | 2009-08-28 12:28:45 +0000 | [diff] [blame] | 238 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, | 
 | 239 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 240 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, | 
 | 241 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, | 
 | 242 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, | 
| Matt Carlson | 5e7ccf2 | 2009-08-25 10:08:42 +0000 | [diff] [blame] | 243 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, | 
| Matt Carlson | 5001e2f | 2009-11-13 13:03:51 +0000 | [diff] [blame] | 244 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, | 
 | 245 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, | 
 | 246 | 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, | 
| Henrik Kretzschmar | 1318521 | 2006-08-22 00:28:33 -0700 | [diff] [blame] | 247 | 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, | 
 | 248 | 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, | 
 | 249 | 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, | 
 | 250 | 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, | 
 | 251 | 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, | 
 | 252 | 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, | 
 | 253 | 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, | 
 | 254 | 	{} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | }; | 
 | 256 |  | 
 | 257 | MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); | 
 | 258 |  | 
| Andreas Mohr | 50da859 | 2006-08-14 23:54:30 -0700 | [diff] [blame] | 259 | static const struct { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | 	const char string[ETH_GSTRING_LEN]; | 
 | 261 | } ethtool_stats_keys[TG3_NUM_STATS] = { | 
 | 262 | 	{ "rx_octets" }, | 
 | 263 | 	{ "rx_fragments" }, | 
 | 264 | 	{ "rx_ucast_packets" }, | 
 | 265 | 	{ "rx_mcast_packets" }, | 
 | 266 | 	{ "rx_bcast_packets" }, | 
 | 267 | 	{ "rx_fcs_errors" }, | 
 | 268 | 	{ "rx_align_errors" }, | 
 | 269 | 	{ "rx_xon_pause_rcvd" }, | 
 | 270 | 	{ "rx_xoff_pause_rcvd" }, | 
 | 271 | 	{ "rx_mac_ctrl_rcvd" }, | 
 | 272 | 	{ "rx_xoff_entered" }, | 
 | 273 | 	{ "rx_frame_too_long_errors" }, | 
 | 274 | 	{ "rx_jabbers" }, | 
 | 275 | 	{ "rx_undersize_packets" }, | 
 | 276 | 	{ "rx_in_length_errors" }, | 
 | 277 | 	{ "rx_out_length_errors" }, | 
 | 278 | 	{ "rx_64_or_less_octet_packets" }, | 
 | 279 | 	{ "rx_65_to_127_octet_packets" }, | 
 | 280 | 	{ "rx_128_to_255_octet_packets" }, | 
 | 281 | 	{ "rx_256_to_511_octet_packets" }, | 
 | 282 | 	{ "rx_512_to_1023_octet_packets" }, | 
 | 283 | 	{ "rx_1024_to_1522_octet_packets" }, | 
 | 284 | 	{ "rx_1523_to_2047_octet_packets" }, | 
 | 285 | 	{ "rx_2048_to_4095_octet_packets" }, | 
 | 286 | 	{ "rx_4096_to_8191_octet_packets" }, | 
 | 287 | 	{ "rx_8192_to_9022_octet_packets" }, | 
 | 288 |  | 
 | 289 | 	{ "tx_octets" }, | 
 | 290 | 	{ "tx_collisions" }, | 
 | 291 |  | 
 | 292 | 	{ "tx_xon_sent" }, | 
 | 293 | 	{ "tx_xoff_sent" }, | 
 | 294 | 	{ "tx_flow_control" }, | 
 | 295 | 	{ "tx_mac_errors" }, | 
 | 296 | 	{ "tx_single_collisions" }, | 
 | 297 | 	{ "tx_mult_collisions" }, | 
 | 298 | 	{ "tx_deferred" }, | 
 | 299 | 	{ "tx_excessive_collisions" }, | 
 | 300 | 	{ "tx_late_collisions" }, | 
 | 301 | 	{ "tx_collide_2times" }, | 
 | 302 | 	{ "tx_collide_3times" }, | 
 | 303 | 	{ "tx_collide_4times" }, | 
 | 304 | 	{ "tx_collide_5times" }, | 
 | 305 | 	{ "tx_collide_6times" }, | 
 | 306 | 	{ "tx_collide_7times" }, | 
 | 307 | 	{ "tx_collide_8times" }, | 
 | 308 | 	{ "tx_collide_9times" }, | 
 | 309 | 	{ "tx_collide_10times" }, | 
 | 310 | 	{ "tx_collide_11times" }, | 
 | 311 | 	{ "tx_collide_12times" }, | 
 | 312 | 	{ "tx_collide_13times" }, | 
 | 313 | 	{ "tx_collide_14times" }, | 
 | 314 | 	{ "tx_collide_15times" }, | 
 | 315 | 	{ "tx_ucast_packets" }, | 
 | 316 | 	{ "tx_mcast_packets" }, | 
 | 317 | 	{ "tx_bcast_packets" }, | 
 | 318 | 	{ "tx_carrier_sense_errors" }, | 
 | 319 | 	{ "tx_discards" }, | 
 | 320 | 	{ "tx_errors" }, | 
 | 321 |  | 
 | 322 | 	{ "dma_writeq_full" }, | 
 | 323 | 	{ "dma_write_prioq_full" }, | 
 | 324 | 	{ "rxbds_empty" }, | 
 | 325 | 	{ "rx_discards" }, | 
 | 326 | 	{ "rx_errors" }, | 
 | 327 | 	{ "rx_threshold_hit" }, | 
 | 328 |  | 
 | 329 | 	{ "dma_readq_full" }, | 
 | 330 | 	{ "dma_read_prioq_full" }, | 
 | 331 | 	{ "tx_comp_queue_full" }, | 
 | 332 |  | 
 | 333 | 	{ "ring_set_send_prod_index" }, | 
 | 334 | 	{ "ring_status_update" }, | 
 | 335 | 	{ "nic_irqs" }, | 
 | 336 | 	{ "nic_avoided_irqs" }, | 
 | 337 | 	{ "nic_tx_threshold_hit" } | 
 | 338 | }; | 
 | 339 |  | 
| Andreas Mohr | 50da859 | 2006-08-14 23:54:30 -0700 | [diff] [blame] | 340 | static const struct { | 
| Michael Chan | 4cafd3f | 2005-05-29 14:56:34 -0700 | [diff] [blame] | 341 | 	const char string[ETH_GSTRING_LEN]; | 
 | 342 | } ethtool_test_keys[TG3_NUM_TEST] = { | 
 | 343 | 	{ "nvram test     (online) " }, | 
 | 344 | 	{ "link test      (online) " }, | 
 | 345 | 	{ "register test  (offline)" }, | 
 | 346 | 	{ "memory test    (offline)" }, | 
 | 347 | 	{ "loopback test  (offline)" }, | 
 | 348 | 	{ "interrupt test (offline)" }, | 
 | 349 | }; | 
 | 350 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 351 | static void tg3_write32(struct tg3 *tp, u32 off, u32 val) | 
 | 352 | { | 
 | 353 | 	writel(val, tp->regs + off); | 
 | 354 | } | 
 | 355 |  | 
 | 356 | static u32 tg3_read32(struct tg3 *tp, u32 off) | 
 | 357 | { | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 358 | 	return (readl(tp->regs + off)); | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 359 | } | 
 | 360 |  | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 361 | static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) | 
 | 362 | { | 
 | 363 | 	writel(val, tp->aperegs + off); | 
 | 364 | } | 
 | 365 |  | 
 | 366 | static u32 tg3_ape_read32(struct tg3 *tp, u32 off) | 
 | 367 | { | 
 | 368 | 	return (readl(tp->aperegs + off)); | 
 | 369 | } | 
 | 370 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) | 
 | 372 | { | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 373 | 	unsigned long flags; | 
 | 374 |  | 
 | 375 | 	spin_lock_irqsave(&tp->indirect_lock, flags); | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 376 | 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); | 
 | 377 | 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 378 | 	spin_unlock_irqrestore(&tp->indirect_lock, flags); | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 379 | } | 
 | 380 |  | 
 | 381 | static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) | 
 | 382 | { | 
 | 383 | 	writel(val, tp->regs + off); | 
 | 384 | 	readl(tp->regs + off); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | } | 
 | 386 |  | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 387 | static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) | 
 | 388 | { | 
 | 389 | 	unsigned long flags; | 
 | 390 | 	u32 val; | 
 | 391 |  | 
 | 392 | 	spin_lock_irqsave(&tp->indirect_lock, flags); | 
 | 393 | 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); | 
 | 394 | 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); | 
 | 395 | 	spin_unlock_irqrestore(&tp->indirect_lock, flags); | 
 | 396 | 	return val; | 
 | 397 | } | 
 | 398 |  | 
 | 399 | static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) | 
 | 400 | { | 
 | 401 | 	unsigned long flags; | 
 | 402 |  | 
 | 403 | 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { | 
 | 404 | 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + | 
 | 405 | 				       TG3_64BIT_REG_LOW, val); | 
 | 406 | 		return; | 
 | 407 | 	} | 
| Matt Carlson | 66711e6 | 2009-11-13 13:03:49 +0000 | [diff] [blame] | 408 | 	if (off == TG3_RX_STD_PROD_IDX_REG) { | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 409 | 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + | 
 | 410 | 				       TG3_64BIT_REG_LOW, val); | 
 | 411 | 		return; | 
 | 412 | 	} | 
 | 413 |  | 
 | 414 | 	spin_lock_irqsave(&tp->indirect_lock, flags); | 
 | 415 | 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); | 
 | 416 | 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); | 
 | 417 | 	spin_unlock_irqrestore(&tp->indirect_lock, flags); | 
 | 418 |  | 
 | 419 | 	/* In indirect mode when disabling interrupts, we also need | 
 | 420 | 	 * to clear the interrupt bit in the GRC local ctrl register. | 
 | 421 | 	 */ | 
 | 422 | 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && | 
 | 423 | 	    (val == 0x1)) { | 
 | 424 | 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, | 
 | 425 | 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); | 
 | 426 | 	} | 
 | 427 | } | 
 | 428 |  | 
 | 429 | static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) | 
 | 430 | { | 
 | 431 | 	unsigned long flags; | 
 | 432 | 	u32 val; | 
 | 433 |  | 
 | 434 | 	spin_lock_irqsave(&tp->indirect_lock, flags); | 
 | 435 | 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); | 
 | 436 | 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); | 
 | 437 | 	spin_unlock_irqrestore(&tp->indirect_lock, flags); | 
 | 438 | 	return val; | 
 | 439 | } | 
 | 440 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 441 | /* usec_wait specifies the wait time in usec when writing to certain registers | 
 | 442 |  * where it is unsafe to read back the register without some delay. | 
 | 443 |  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. | 
 | 444 |  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. | 
 | 445 |  */ | 
 | 446 | static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | { | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 448 | 	if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) || | 
 | 449 | 	    (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) | 
 | 450 | 		/* Non-posted methods */ | 
 | 451 | 		tp->write32(tp, off, val); | 
 | 452 | 	else { | 
 | 453 | 		/* Posted method */ | 
 | 454 | 		tg3_write32(tp, off, val); | 
 | 455 | 		if (usec_wait) | 
 | 456 | 			udelay(usec_wait); | 
 | 457 | 		tp->read32(tp, off); | 
 | 458 | 	} | 
 | 459 | 	/* Wait again after the read for the posted method to guarantee that | 
 | 460 | 	 * the wait time is met. | 
 | 461 | 	 */ | 
 | 462 | 	if (usec_wait) | 
 | 463 | 		udelay(usec_wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | } | 
 | 465 |  | 
| Michael Chan | 09ee929 | 2005-08-09 20:17:00 -0700 | [diff] [blame] | 466 | static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) | 
 | 467 | { | 
 | 468 | 	tp->write32_mbox(tp, off, val); | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 469 | 	if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) && | 
 | 470 | 	    !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND)) | 
 | 471 | 		tp->read32_mbox(tp, off); | 
| Michael Chan | 09ee929 | 2005-08-09 20:17:00 -0700 | [diff] [blame] | 472 | } | 
 | 473 |  | 
| Michael Chan | 2009493 | 2005-08-09 20:16:32 -0700 | [diff] [blame] | 474 | static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | { | 
 | 476 | 	void __iomem *mbox = tp->regs + off; | 
 | 477 | 	writel(val, mbox); | 
 | 478 | 	if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) | 
 | 479 | 		writel(val, mbox); | 
 | 480 | 	if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) | 
 | 481 | 		readl(mbox); | 
 | 482 | } | 
 | 483 |  | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 484 | static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) | 
 | 485 | { | 
 | 486 | 	return (readl(tp->regs + off + GRCMBOX_BASE)); | 
 | 487 | } | 
 | 488 |  | 
 | 489 | static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) | 
 | 490 | { | 
 | 491 | 	writel(val, tp->regs + off + GRCMBOX_BASE); | 
 | 492 | } | 
 | 493 |  | 
| Michael Chan | 2009493 | 2005-08-09 20:16:32 -0700 | [diff] [blame] | 494 | #define tw32_mailbox(reg, val)	tp->write32_mbox(tp, reg, val) | 
| Michael Chan | 09ee929 | 2005-08-09 20:17:00 -0700 | [diff] [blame] | 495 | #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val)) | 
| Michael Chan | 2009493 | 2005-08-09 20:16:32 -0700 | [diff] [blame] | 496 | #define tw32_rx_mbox(reg, val)	tp->write32_rx_mbox(tp, reg, val) | 
 | 497 | #define tw32_tx_mbox(reg, val)	tp->write32_tx_mbox(tp, reg, val) | 
| Michael Chan | 09ee929 | 2005-08-09 20:17:00 -0700 | [diff] [blame] | 498 | #define tr32_mailbox(reg)	tp->read32_mbox(tp, reg) | 
| Michael Chan | 2009493 | 2005-08-09 20:16:32 -0700 | [diff] [blame] | 499 |  | 
 | 500 | #define tw32(reg,val)		tp->write32(tp, reg, val) | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 501 | #define tw32_f(reg,val)		_tw32_flush(tp,(reg),(val), 0) | 
 | 502 | #define tw32_wait_f(reg,val,us)	_tw32_flush(tp,(reg),(val), (us)) | 
| Michael Chan | 2009493 | 2005-08-09 20:16:32 -0700 | [diff] [blame] | 503 | #define tr32(reg)		tp->read32(tp, reg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 |  | 
 | 505 | static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) | 
 | 506 | { | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 507 | 	unsigned long flags; | 
 | 508 |  | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 509 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && | 
 | 510 | 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) | 
 | 511 | 		return; | 
 | 512 |  | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 513 | 	spin_lock_irqsave(&tp->indirect_lock, flags); | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 514 | 	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { | 
 | 515 | 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); | 
 | 516 | 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 |  | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 518 | 		/* Always leave this as zero. */ | 
 | 519 | 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); | 
 | 520 | 	} else { | 
 | 521 | 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); | 
 | 522 | 		tw32_f(TG3PCI_MEM_WIN_DATA, val); | 
 | 523 |  | 
 | 524 | 		/* Always leave this as zero. */ | 
 | 525 | 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 
 | 526 | 	} | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 527 | 	spin_unlock_irqrestore(&tp->indirect_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | } | 
 | 529 |  | 
 | 530 | static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) | 
 | 531 | { | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 532 | 	unsigned long flags; | 
 | 533 |  | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 534 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) && | 
 | 535 | 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { | 
 | 536 | 		*val = 0; | 
 | 537 | 		return; | 
 | 538 | 	} | 
 | 539 |  | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 540 | 	spin_lock_irqsave(&tp->indirect_lock, flags); | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 541 | 	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) { | 
 | 542 | 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); | 
 | 543 | 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 |  | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 545 | 		/* Always leave this as zero. */ | 
 | 546 | 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); | 
 | 547 | 	} else { | 
 | 548 | 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); | 
 | 549 | 		*val = tr32(TG3PCI_MEM_WIN_DATA); | 
 | 550 |  | 
 | 551 | 		/* Always leave this as zero. */ | 
 | 552 | 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 
 | 553 | 	} | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 554 | 	spin_unlock_irqrestore(&tp->indirect_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | } | 
 | 556 |  | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 557 | static void tg3_ape_lock_init(struct tg3 *tp) | 
 | 558 | { | 
 | 559 | 	int i; | 
 | 560 |  | 
 | 561 | 	/* Make sure the driver hasn't any stale locks. */ | 
 | 562 | 	for (i = 0; i < 8; i++) | 
 | 563 | 		tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i, | 
 | 564 | 				APE_LOCK_GRANT_DRIVER); | 
 | 565 | } | 
 | 566 |  | 
 | 567 | static int tg3_ape_lock(struct tg3 *tp, int locknum) | 
 | 568 | { | 
 | 569 | 	int i, off; | 
 | 570 | 	int ret = 0; | 
 | 571 | 	u32 status; | 
 | 572 |  | 
 | 573 | 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) | 
 | 574 | 		return 0; | 
 | 575 |  | 
 | 576 | 	switch (locknum) { | 
| Matt Carlson | 77b483f | 2008-08-15 14:07:24 -0700 | [diff] [blame] | 577 | 		case TG3_APE_LOCK_GRC: | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 578 | 		case TG3_APE_LOCK_MEM: | 
 | 579 | 			break; | 
 | 580 | 		default: | 
 | 581 | 			return -EINVAL; | 
 | 582 | 	} | 
 | 583 |  | 
 | 584 | 	off = 4 * locknum; | 
 | 585 |  | 
 | 586 | 	tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER); | 
 | 587 |  | 
 | 588 | 	/* Wait for up to 1 millisecond to acquire lock. */ | 
 | 589 | 	for (i = 0; i < 100; i++) { | 
 | 590 | 		status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off); | 
 | 591 | 		if (status == APE_LOCK_GRANT_DRIVER) | 
 | 592 | 			break; | 
 | 593 | 		udelay(10); | 
 | 594 | 	} | 
 | 595 |  | 
 | 596 | 	if (status != APE_LOCK_GRANT_DRIVER) { | 
 | 597 | 		/* Revoke the lock request. */ | 
 | 598 | 		tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, | 
 | 599 | 				APE_LOCK_GRANT_DRIVER); | 
 | 600 |  | 
 | 601 | 		ret = -EBUSY; | 
 | 602 | 	} | 
 | 603 |  | 
 | 604 | 	return ret; | 
 | 605 | } | 
 | 606 |  | 
 | 607 | static void tg3_ape_unlock(struct tg3 *tp, int locknum) | 
 | 608 | { | 
 | 609 | 	int off; | 
 | 610 |  | 
 | 611 | 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) | 
 | 612 | 		return; | 
 | 613 |  | 
 | 614 | 	switch (locknum) { | 
| Matt Carlson | 77b483f | 2008-08-15 14:07:24 -0700 | [diff] [blame] | 615 | 		case TG3_APE_LOCK_GRC: | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 616 | 		case TG3_APE_LOCK_MEM: | 
 | 617 | 			break; | 
 | 618 | 		default: | 
 | 619 | 			return; | 
 | 620 | 	} | 
 | 621 |  | 
 | 622 | 	off = 4 * locknum; | 
 | 623 | 	tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER); | 
 | 624 | } | 
 | 625 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | static void tg3_disable_ints(struct tg3 *tp) | 
 | 627 | { | 
| Matt Carlson | 89aeb3b | 2009-09-01 13:08:58 +0000 | [diff] [blame] | 628 | 	int i; | 
 | 629 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | 	tw32(TG3PCI_MISC_HOST_CTRL, | 
 | 631 | 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); | 
| Matt Carlson | 89aeb3b | 2009-09-01 13:08:58 +0000 | [diff] [blame] | 632 | 	for (i = 0; i < tp->irq_max; i++) | 
 | 633 | 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | } | 
 | 635 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | static void tg3_enable_ints(struct tg3 *tp) | 
 | 637 | { | 
| Matt Carlson | 89aeb3b | 2009-09-01 13:08:58 +0000 | [diff] [blame] | 638 | 	int i; | 
 | 639 | 	u32 coal_now = 0; | 
 | 640 |  | 
| Michael Chan | bbe832c | 2005-06-24 20:20:04 -0700 | [diff] [blame] | 641 | 	tp->irq_sync = 0; | 
 | 642 | 	wmb(); | 
 | 643 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | 	tw32(TG3PCI_MISC_HOST_CTRL, | 
 | 645 | 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); | 
| Matt Carlson | f19af9c | 2009-09-01 12:47:49 +0000 | [diff] [blame] | 646 |  | 
| Matt Carlson | 89aeb3b | 2009-09-01 13:08:58 +0000 | [diff] [blame] | 647 | 	for (i = 0; i < tp->irq_cnt; i++) { | 
 | 648 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
 | 649 | 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); | 
 | 650 | 		if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) | 
 | 651 | 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); | 
 | 652 |  | 
 | 653 | 		coal_now |= tnapi->coal_now; | 
 | 654 | 	} | 
| Matt Carlson | f19af9c | 2009-09-01 12:47:49 +0000 | [diff] [blame] | 655 |  | 
 | 656 | 	/* Force an initial interrupt */ | 
 | 657 | 	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && | 
 | 658 | 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) | 
 | 659 | 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); | 
 | 660 | 	else | 
 | 661 | 		tw32(HOSTCC_MODE, tp->coalesce_mode | | 
 | 662 | 		     HOSTCC_MODE_ENABLE | coal_now); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | } | 
 | 664 |  | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 665 | static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) | 
| Michael Chan | 04237dd | 2005-04-25 15:17:17 -0700 | [diff] [blame] | 666 | { | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 667 | 	struct tg3 *tp = tnapi->tp; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 668 | 	struct tg3_hw_status *sblk = tnapi->hw_status; | 
| Michael Chan | 04237dd | 2005-04-25 15:17:17 -0700 | [diff] [blame] | 669 | 	unsigned int work_exists = 0; | 
 | 670 |  | 
 | 671 | 	/* check for phy events */ | 
 | 672 | 	if (!(tp->tg3_flags & | 
 | 673 | 	      (TG3_FLAG_USE_LINKCHG_REG | | 
 | 674 | 	       TG3_FLAG_POLL_SERDES))) { | 
 | 675 | 		if (sblk->status & SD_STATUS_LINK_CHG) | 
 | 676 | 			work_exists = 1; | 
 | 677 | 	} | 
 | 678 | 	/* check for RX/TX work to do */ | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 679 | 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons || | 
| Matt Carlson | 8d9d7cf | 2009-09-01 13:19:05 +0000 | [diff] [blame] | 680 | 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) | 
| Michael Chan | 04237dd | 2005-04-25 15:17:17 -0700 | [diff] [blame] | 681 | 		work_exists = 1; | 
 | 682 |  | 
 | 683 | 	return work_exists; | 
 | 684 | } | 
 | 685 |  | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 686 | /* tg3_int_reenable | 
| Michael Chan | 04237dd | 2005-04-25 15:17:17 -0700 | [diff] [blame] | 687 |  *  similar to tg3_enable_ints, but it accurately determines whether there | 
 | 688 |  *  is new work pending and can return without flushing the PIO write | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 689 |  *  which reenables interrupts | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 |  */ | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 691 | static void tg3_int_reenable(struct tg3_napi *tnapi) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | { | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 693 | 	struct tg3 *tp = tnapi->tp; | 
 | 694 |  | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 695 | 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | 	mmiowb(); | 
 | 697 |  | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 698 | 	/* When doing tagged status, this work check is unnecessary. | 
 | 699 | 	 * The last_tag we write above tells the chip which piece of | 
 | 700 | 	 * work we've completed. | 
 | 701 | 	 */ | 
 | 702 | 	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) && | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 703 | 	    tg3_has_work(tnapi)) | 
| Michael Chan | 04237dd | 2005-04-25 15:17:17 -0700 | [diff] [blame] | 704 | 		tw32(HOSTCC_MODE, tp->coalesce_mode | | 
| Matt Carlson | fd2ce37 | 2009-09-01 12:51:13 +0000 | [diff] [blame] | 705 | 		     HOSTCC_MODE_ENABLE | tnapi->coal_now); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | } | 
 | 707 |  | 
| Matt Carlson | fed9781 | 2009-09-01 13:10:19 +0000 | [diff] [blame] | 708 | static void tg3_napi_disable(struct tg3 *tp) | 
 | 709 | { | 
 | 710 | 	int i; | 
 | 711 |  | 
 | 712 | 	for (i = tp->irq_cnt - 1; i >= 0; i--) | 
 | 713 | 		napi_disable(&tp->napi[i].napi); | 
 | 714 | } | 
 | 715 |  | 
 | 716 | static void tg3_napi_enable(struct tg3 *tp) | 
 | 717 | { | 
 | 718 | 	int i; | 
 | 719 |  | 
 | 720 | 	for (i = 0; i < tp->irq_cnt; i++) | 
 | 721 | 		napi_enable(&tp->napi[i].napi); | 
 | 722 | } | 
 | 723 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | static inline void tg3_netif_stop(struct tg3 *tp) | 
 | 725 | { | 
| Michael Chan | bbe832c | 2005-06-24 20:20:04 -0700 | [diff] [blame] | 726 | 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */ | 
| Matt Carlson | fed9781 | 2009-09-01 13:10:19 +0000 | [diff] [blame] | 727 | 	tg3_napi_disable(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | 	netif_tx_disable(tp->dev); | 
 | 729 | } | 
 | 730 |  | 
 | 731 | static inline void tg3_netif_start(struct tg3 *tp) | 
 | 732 | { | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 733 | 	/* NOTE: unconditional netif_tx_wake_all_queues is only | 
 | 734 | 	 * appropriate so long as all callers are assured to | 
 | 735 | 	 * have free tx slots (such as after tg3_init_hw) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | 	 */ | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 737 | 	netif_tx_wake_all_queues(tp->dev); | 
 | 738 |  | 
| Matt Carlson | fed9781 | 2009-09-01 13:10:19 +0000 | [diff] [blame] | 739 | 	tg3_napi_enable(tp); | 
 | 740 | 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 741 | 	tg3_enable_ints(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | } | 
 | 743 |  | 
 | 744 | static void tg3_switch_clocks(struct tg3 *tp) | 
 | 745 | { | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 746 | 	u32 clock_ctrl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | 	u32 orig_clock_ctrl; | 
 | 748 |  | 
| Matt Carlson | 795d01c | 2007-10-07 23:28:17 -0700 | [diff] [blame] | 749 | 	if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || | 
 | 750 | 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 
| Michael Chan | 4cf78e4 | 2005-07-25 12:29:19 -0700 | [diff] [blame] | 751 | 		return; | 
 | 752 |  | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 753 | 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); | 
 | 754 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | 	orig_clock_ctrl = clock_ctrl; | 
 | 756 | 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | | 
 | 757 | 		       CLOCK_CTRL_CLKRUN_OENABLE | | 
 | 758 | 		       0x1f); | 
 | 759 | 	tp->pci_clock_ctrl = clock_ctrl; | 
 | 760 |  | 
 | 761 | 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 
 | 762 | 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 763 | 			tw32_wait_f(TG3PCI_CLOCK_CTRL, | 
 | 764 | 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | 		} | 
 | 766 | 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 767 | 		tw32_wait_f(TG3PCI_CLOCK_CTRL, | 
 | 768 | 			    clock_ctrl | | 
 | 769 | 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), | 
 | 770 | 			    40); | 
 | 771 | 		tw32_wait_f(TG3PCI_CLOCK_CTRL, | 
 | 772 | 			    clock_ctrl | (CLOCK_CTRL_ALTCLK), | 
 | 773 | 			    40); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | 	} | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 775 | 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | } | 
 | 777 |  | 
 | 778 | #define PHY_BUSY_LOOPS	5000 | 
 | 779 |  | 
 | 780 | static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) | 
 | 781 | { | 
 | 782 | 	u32 frame_val; | 
 | 783 | 	unsigned int loops; | 
 | 784 | 	int ret; | 
 | 785 |  | 
 | 786 | 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { | 
 | 787 | 		tw32_f(MAC_MI_MODE, | 
 | 788 | 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); | 
 | 789 | 		udelay(80); | 
 | 790 | 	} | 
 | 791 |  | 
 | 792 | 	*val = 0x0; | 
 | 793 |  | 
| Matt Carlson | 882e979 | 2009-09-01 13:21:36 +0000 | [diff] [blame] | 794 | 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 795 | 		      MI_COM_PHY_ADDR_MASK); | 
 | 796 | 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & | 
 | 797 | 		      MI_COM_REG_ADDR_MASK); | 
 | 798 | 	frame_val |= (MI_COM_CMD_READ | MI_COM_START); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 799 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | 	tw32_f(MAC_MI_COM, frame_val); | 
 | 801 |  | 
 | 802 | 	loops = PHY_BUSY_LOOPS; | 
 | 803 | 	while (loops != 0) { | 
 | 804 | 		udelay(10); | 
 | 805 | 		frame_val = tr32(MAC_MI_COM); | 
 | 806 |  | 
 | 807 | 		if ((frame_val & MI_COM_BUSY) == 0) { | 
 | 808 | 			udelay(5); | 
 | 809 | 			frame_val = tr32(MAC_MI_COM); | 
 | 810 | 			break; | 
 | 811 | 		} | 
 | 812 | 		loops -= 1; | 
 | 813 | 	} | 
 | 814 |  | 
 | 815 | 	ret = -EBUSY; | 
 | 816 | 	if (loops != 0) { | 
 | 817 | 		*val = frame_val & MI_COM_DATA_MASK; | 
 | 818 | 		ret = 0; | 
 | 819 | 	} | 
 | 820 |  | 
 | 821 | 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { | 
 | 822 | 		tw32_f(MAC_MI_MODE, tp->mi_mode); | 
 | 823 | 		udelay(80); | 
 | 824 | 	} | 
 | 825 |  | 
 | 826 | 	return ret; | 
 | 827 | } | 
 | 828 |  | 
 | 829 | static int tg3_writephy(struct tg3 *tp, int reg, u32 val) | 
 | 830 | { | 
 | 831 | 	u32 frame_val; | 
 | 832 | 	unsigned int loops; | 
 | 833 | 	int ret; | 
 | 834 |  | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 835 | 	if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 836 | 	    (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL)) | 
 | 837 | 		return 0; | 
 | 838 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { | 
 | 840 | 		tw32_f(MAC_MI_MODE, | 
 | 841 | 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); | 
 | 842 | 		udelay(80); | 
 | 843 | 	} | 
 | 844 |  | 
| Matt Carlson | 882e979 | 2009-09-01 13:21:36 +0000 | [diff] [blame] | 845 | 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | 		      MI_COM_PHY_ADDR_MASK); | 
 | 847 | 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & | 
 | 848 | 		      MI_COM_REG_ADDR_MASK); | 
 | 849 | 	frame_val |= (val & MI_COM_DATA_MASK); | 
 | 850 | 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 851 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | 	tw32_f(MAC_MI_COM, frame_val); | 
 | 853 |  | 
 | 854 | 	loops = PHY_BUSY_LOOPS; | 
 | 855 | 	while (loops != 0) { | 
 | 856 | 		udelay(10); | 
 | 857 | 		frame_val = tr32(MAC_MI_COM); | 
 | 858 | 		if ((frame_val & MI_COM_BUSY) == 0) { | 
 | 859 | 			udelay(5); | 
 | 860 | 			frame_val = tr32(MAC_MI_COM); | 
 | 861 | 			break; | 
 | 862 | 		} | 
 | 863 | 		loops -= 1; | 
 | 864 | 	} | 
 | 865 |  | 
 | 866 | 	ret = -EBUSY; | 
 | 867 | 	if (loops != 0) | 
 | 868 | 		ret = 0; | 
 | 869 |  | 
 | 870 | 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { | 
 | 871 | 		tw32_f(MAC_MI_MODE, tp->mi_mode); | 
 | 872 | 		udelay(80); | 
 | 873 | 	} | 
 | 874 |  | 
 | 875 | 	return ret; | 
 | 876 | } | 
 | 877 |  | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 878 | static int tg3_bmcr_reset(struct tg3 *tp) | 
 | 879 | { | 
 | 880 | 	u32 phy_control; | 
 | 881 | 	int limit, err; | 
 | 882 |  | 
 | 883 | 	/* OK, reset it, and poll the BMCR_RESET bit until it | 
 | 884 | 	 * clears or we time out. | 
 | 885 | 	 */ | 
 | 886 | 	phy_control = BMCR_RESET; | 
 | 887 | 	err = tg3_writephy(tp, MII_BMCR, phy_control); | 
 | 888 | 	if (err != 0) | 
 | 889 | 		return -EBUSY; | 
 | 890 |  | 
 | 891 | 	limit = 5000; | 
 | 892 | 	while (limit--) { | 
 | 893 | 		err = tg3_readphy(tp, MII_BMCR, &phy_control); | 
 | 894 | 		if (err != 0) | 
 | 895 | 			return -EBUSY; | 
 | 896 |  | 
 | 897 | 		if ((phy_control & BMCR_RESET) == 0) { | 
 | 898 | 			udelay(40); | 
 | 899 | 			break; | 
 | 900 | 		} | 
 | 901 | 		udelay(10); | 
 | 902 | 	} | 
| Roel Kluin | d4675b5 | 2009-02-12 16:33:27 -0800 | [diff] [blame] | 903 | 	if (limit < 0) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 904 | 		return -EBUSY; | 
 | 905 |  | 
 | 906 | 	return 0; | 
 | 907 | } | 
 | 908 |  | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 909 | static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) | 
 | 910 | { | 
| Francois Romieu | 3d16543 | 2009-01-19 16:56:50 -0800 | [diff] [blame] | 911 | 	struct tg3 *tp = bp->priv; | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 912 | 	u32 val; | 
 | 913 |  | 
| Matt Carlson | 24bb4fb | 2009-10-05 17:55:29 +0000 | [diff] [blame] | 914 | 	spin_lock_bh(&tp->lock); | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 915 |  | 
 | 916 | 	if (tg3_readphy(tp, reg, &val)) | 
| Matt Carlson | 24bb4fb | 2009-10-05 17:55:29 +0000 | [diff] [blame] | 917 | 		val = -EIO; | 
 | 918 |  | 
 | 919 | 	spin_unlock_bh(&tp->lock); | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 920 |  | 
 | 921 | 	return val; | 
 | 922 | } | 
 | 923 |  | 
 | 924 | static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) | 
 | 925 | { | 
| Francois Romieu | 3d16543 | 2009-01-19 16:56:50 -0800 | [diff] [blame] | 926 | 	struct tg3 *tp = bp->priv; | 
| Matt Carlson | 24bb4fb | 2009-10-05 17:55:29 +0000 | [diff] [blame] | 927 | 	u32 ret = 0; | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 928 |  | 
| Matt Carlson | 24bb4fb | 2009-10-05 17:55:29 +0000 | [diff] [blame] | 929 | 	spin_lock_bh(&tp->lock); | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 930 |  | 
 | 931 | 	if (tg3_writephy(tp, reg, val)) | 
| Matt Carlson | 24bb4fb | 2009-10-05 17:55:29 +0000 | [diff] [blame] | 932 | 		ret = -EIO; | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 933 |  | 
| Matt Carlson | 24bb4fb | 2009-10-05 17:55:29 +0000 | [diff] [blame] | 934 | 	spin_unlock_bh(&tp->lock); | 
 | 935 |  | 
 | 936 | 	return ret; | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 937 | } | 
 | 938 |  | 
 | 939 | static int tg3_mdio_reset(struct mii_bus *bp) | 
 | 940 | { | 
 | 941 | 	return 0; | 
 | 942 | } | 
 | 943 |  | 
| Matt Carlson | 9c61d6b | 2008-11-03 16:54:56 -0800 | [diff] [blame] | 944 | static void tg3_mdio_config_5785(struct tg3 *tp) | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 945 | { | 
 | 946 | 	u32 val; | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 947 | 	struct phy_device *phydev; | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 948 |  | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 949 | 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 950 | 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { | 
 | 951 | 	case TG3_PHY_ID_BCM50610: | 
| Matt Carlson | c73430d | 2009-11-02 14:29:34 +0000 | [diff] [blame] | 952 | 	case TG3_PHY_ID_BCM50610M: | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 953 | 		val = MAC_PHYCFG2_50610_LED_MODES; | 
 | 954 | 		break; | 
 | 955 | 	case TG3_PHY_ID_BCMAC131: | 
 | 956 | 		val = MAC_PHYCFG2_AC131_LED_MODES; | 
 | 957 | 		break; | 
 | 958 | 	case TG3_PHY_ID_RTL8211C: | 
 | 959 | 		val = MAC_PHYCFG2_RTL8211C_LED_MODES; | 
 | 960 | 		break; | 
 | 961 | 	case TG3_PHY_ID_RTL8201E: | 
 | 962 | 		val = MAC_PHYCFG2_RTL8201E_LED_MODES; | 
 | 963 | 		break; | 
 | 964 | 	default: | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 965 | 		return; | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 966 | 	} | 
 | 967 |  | 
 | 968 | 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { | 
 | 969 | 		tw32(MAC_PHYCFG2, val); | 
 | 970 |  | 
 | 971 | 		val = tr32(MAC_PHYCFG1); | 
| Matt Carlson | bb85fbb | 2009-08-25 10:09:07 +0000 | [diff] [blame] | 972 | 		val &= ~(MAC_PHYCFG1_RGMII_INT | | 
 | 973 | 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); | 
 | 974 | 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 975 | 		tw32(MAC_PHYCFG1, val); | 
 | 976 |  | 
 | 977 | 		return; | 
 | 978 | 	} | 
 | 979 |  | 
 | 980 | 	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) | 
 | 981 | 		val |= MAC_PHYCFG2_EMODE_MASK_MASK | | 
 | 982 | 		       MAC_PHYCFG2_FMODE_MASK_MASK | | 
 | 983 | 		       MAC_PHYCFG2_GMODE_MASK_MASK | | 
 | 984 | 		       MAC_PHYCFG2_ACT_MASK_MASK   | | 
 | 985 | 		       MAC_PHYCFG2_QUAL_MASK_MASK | | 
 | 986 | 		       MAC_PHYCFG2_INBAND_ENABLE; | 
 | 987 |  | 
 | 988 | 	tw32(MAC_PHYCFG2, val); | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 989 |  | 
| Matt Carlson | bb85fbb | 2009-08-25 10:09:07 +0000 | [diff] [blame] | 990 | 	val = tr32(MAC_PHYCFG1); | 
 | 991 | 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | | 
 | 992 | 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); | 
 | 993 | 	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) { | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 994 | 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 
 | 995 | 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; | 
 | 996 | 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) | 
 | 997 | 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; | 
 | 998 | 	} | 
| Matt Carlson | bb85fbb | 2009-08-25 10:09:07 +0000 | [diff] [blame] | 999 | 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | | 
 | 1000 | 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; | 
 | 1001 | 	tw32(MAC_PHYCFG1, val); | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1002 |  | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1003 | 	val = tr32(MAC_EXT_RGMII_MODE); | 
 | 1004 | 	val &= ~(MAC_RGMII_MODE_RX_INT_B | | 
 | 1005 | 		 MAC_RGMII_MODE_RX_QUALITY | | 
 | 1006 | 		 MAC_RGMII_MODE_RX_ACTIVITY | | 
 | 1007 | 		 MAC_RGMII_MODE_RX_ENG_DET | | 
 | 1008 | 		 MAC_RGMII_MODE_TX_ENABLE | | 
 | 1009 | 		 MAC_RGMII_MODE_TX_LOWPWR | | 
 | 1010 | 		 MAC_RGMII_MODE_TX_RESET); | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 1011 | 	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) { | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1012 | 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 
 | 1013 | 			val |= MAC_RGMII_MODE_RX_INT_B | | 
 | 1014 | 			       MAC_RGMII_MODE_RX_QUALITY | | 
 | 1015 | 			       MAC_RGMII_MODE_RX_ACTIVITY | | 
 | 1016 | 			       MAC_RGMII_MODE_RX_ENG_DET; | 
 | 1017 | 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) | 
 | 1018 | 			val |= MAC_RGMII_MODE_TX_ENABLE | | 
 | 1019 | 			       MAC_RGMII_MODE_TX_LOWPWR | | 
 | 1020 | 			       MAC_RGMII_MODE_TX_RESET; | 
 | 1021 | 	} | 
 | 1022 | 	tw32(MAC_EXT_RGMII_MODE, val); | 
 | 1023 | } | 
 | 1024 |  | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1025 | static void tg3_mdio_start(struct tg3 *tp) | 
 | 1026 | { | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1027 | 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; | 
 | 1028 | 	tw32_f(MAC_MI_MODE, tp->mi_mode); | 
 | 1029 | 	udelay(80); | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1030 |  | 
| Matt Carlson | 882e979 | 2009-09-01 13:21:36 +0000 | [diff] [blame] | 1031 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 
 | 1032 | 		u32 funcnum, is_serdes; | 
 | 1033 |  | 
 | 1034 | 		funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC; | 
 | 1035 | 		if (funcnum) | 
 | 1036 | 			tp->phy_addr = 2; | 
 | 1037 | 		else | 
 | 1038 | 			tp->phy_addr = 1; | 
 | 1039 |  | 
 | 1040 | 		is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; | 
 | 1041 | 		if (is_serdes) | 
 | 1042 | 			tp->phy_addr += 7; | 
 | 1043 | 	} else | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1044 | 		tp->phy_addr = TG3_PHY_MII_ADDR; | 
| Matt Carlson | 882e979 | 2009-09-01 13:21:36 +0000 | [diff] [blame] | 1045 |  | 
| Matt Carlson | 9c61d6b | 2008-11-03 16:54:56 -0800 | [diff] [blame] | 1046 | 	if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && | 
 | 1047 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | 
 | 1048 | 		tg3_mdio_config_5785(tp); | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1049 | } | 
 | 1050 |  | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1051 | static int tg3_mdio_init(struct tg3 *tp) | 
 | 1052 | { | 
 | 1053 | 	int i; | 
 | 1054 | 	u32 reg; | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1055 | 	struct phy_device *phydev; | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1056 |  | 
 | 1057 | 	tg3_mdio_start(tp); | 
 | 1058 |  | 
 | 1059 | 	if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) || | 
 | 1060 | 	    (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)) | 
 | 1061 | 		return 0; | 
 | 1062 |  | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 1063 | 	tp->mdio_bus = mdiobus_alloc(); | 
 | 1064 | 	if (tp->mdio_bus == NULL) | 
 | 1065 | 		return -ENOMEM; | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1066 |  | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 1067 | 	tp->mdio_bus->name     = "tg3 mdio bus"; | 
 | 1068 | 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1069 | 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn); | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 1070 | 	tp->mdio_bus->priv     = tp; | 
 | 1071 | 	tp->mdio_bus->parent   = &tp->pdev->dev; | 
 | 1072 | 	tp->mdio_bus->read     = &tg3_mdio_read; | 
 | 1073 | 	tp->mdio_bus->write    = &tg3_mdio_write; | 
 | 1074 | 	tp->mdio_bus->reset    = &tg3_mdio_reset; | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1075 | 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 1076 | 	tp->mdio_bus->irq      = &tp->mdio_irq[0]; | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1077 |  | 
 | 1078 | 	for (i = 0; i < PHY_MAX_ADDR; i++) | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 1079 | 		tp->mdio_bus->irq[i] = PHY_POLL; | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1080 |  | 
 | 1081 | 	/* The bus registration will look for all the PHYs on the mdio bus. | 
 | 1082 | 	 * Unfortunately, it does not ensure the PHY is powered up before | 
 | 1083 | 	 * accessing the PHY ID registers.  A chip reset is the | 
 | 1084 | 	 * quickest way to bring the device back to an operational state.. | 
 | 1085 | 	 */ | 
 | 1086 | 	if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) | 
 | 1087 | 		tg3_bmcr_reset(tp); | 
 | 1088 |  | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 1089 | 	i = mdiobus_register(tp->mdio_bus); | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1090 | 	if (i) { | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1091 | 		printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n", | 
 | 1092 | 			tp->dev->name, i); | 
| Matt Carlson | 9c61d6b | 2008-11-03 16:54:56 -0800 | [diff] [blame] | 1093 | 		mdiobus_free(tp->mdio_bus); | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1094 | 		return i; | 
 | 1095 | 	} | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1096 |  | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1097 | 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1098 |  | 
| Matt Carlson | 9c61d6b | 2008-11-03 16:54:56 -0800 | [diff] [blame] | 1099 | 	if (!phydev || !phydev->drv) { | 
 | 1100 | 		printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); | 
 | 1101 | 		mdiobus_unregister(tp->mdio_bus); | 
 | 1102 | 		mdiobus_free(tp->mdio_bus); | 
 | 1103 | 		return -ENODEV; | 
 | 1104 | 	} | 
 | 1105 |  | 
 | 1106 | 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 1107 | 	case TG3_PHY_ID_BCM57780: | 
 | 1108 | 		phydev->interface = PHY_INTERFACE_MODE_GMII; | 
| Matt Carlson | c704dc2 | 2009-11-02 14:32:12 +0000 | [diff] [blame] | 1109 | 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 1110 | 		break; | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1111 | 	case TG3_PHY_ID_BCM50610: | 
| Matt Carlson | c73430d | 2009-11-02 14:29:34 +0000 | [diff] [blame] | 1112 | 	case TG3_PHY_ID_BCM50610M: | 
| Matt Carlson | 32e5a8d | 2009-11-02 14:31:39 +0000 | [diff] [blame] | 1113 | 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | | 
| Matt Carlson | c704dc2 | 2009-11-02 14:32:12 +0000 | [diff] [blame] | 1114 | 				     PHY_BRCM_RX_REFCLK_UNUSED | | 
| Matt Carlson | 52fae08 | 2009-11-02 14:32:38 +0000 | [diff] [blame] | 1115 | 				     PHY_BRCM_DIS_TXCRXC_NOENRGY | | 
| Matt Carlson | c704dc2 | 2009-11-02 14:32:12 +0000 | [diff] [blame] | 1116 | 				     PHY_BRCM_AUTO_PWRDWN_ENABLE; | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1117 | 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) | 
 | 1118 | 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; | 
 | 1119 | 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 
 | 1120 | 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; | 
 | 1121 | 		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) | 
 | 1122 | 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 1123 | 		/* fallthru */ | 
 | 1124 | 	case TG3_PHY_ID_RTL8211C: | 
 | 1125 | 		phydev->interface = PHY_INTERFACE_MODE_RGMII; | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1126 | 		break; | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 1127 | 	case TG3_PHY_ID_RTL8201E: | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1128 | 	case TG3_PHY_ID_BCMAC131: | 
 | 1129 | 		phydev->interface = PHY_INTERFACE_MODE_MII; | 
| Matt Carlson | cdd4e09d | 2009-11-02 14:31:11 +0000 | [diff] [blame] | 1130 | 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 1131 | 		tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1132 | 		break; | 
 | 1133 | 	} | 
 | 1134 |  | 
| Matt Carlson | 9c61d6b | 2008-11-03 16:54:56 -0800 | [diff] [blame] | 1135 | 	tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED; | 
 | 1136 |  | 
 | 1137 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | 
 | 1138 | 		tg3_mdio_config_5785(tp); | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1139 |  | 
 | 1140 | 	return 0; | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1141 | } | 
 | 1142 |  | 
 | 1143 | static void tg3_mdio_fini(struct tg3 *tp) | 
 | 1144 | { | 
 | 1145 | 	if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { | 
 | 1146 | 		tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 1147 | 		mdiobus_unregister(tp->mdio_bus); | 
 | 1148 | 		mdiobus_free(tp->mdio_bus); | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 1149 | 	} | 
 | 1150 | } | 
 | 1151 |  | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1152 | /* tp->lock is held. */ | 
| Matt Carlson | 4ba526c | 2008-08-15 14:10:04 -0700 | [diff] [blame] | 1153 | static inline void tg3_generate_fw_event(struct tg3 *tp) | 
 | 1154 | { | 
 | 1155 | 	u32 val; | 
 | 1156 |  | 
 | 1157 | 	val = tr32(GRC_RX_CPU_EVENT); | 
 | 1158 | 	val |= GRC_RX_CPU_DRIVER_EVENT; | 
 | 1159 | 	tw32_f(GRC_RX_CPU_EVENT, val); | 
 | 1160 |  | 
 | 1161 | 	tp->last_event_jiffies = jiffies; | 
 | 1162 | } | 
 | 1163 |  | 
 | 1164 | #define TG3_FW_EVENT_TIMEOUT_USEC 2500 | 
 | 1165 |  | 
 | 1166 | /* tp->lock is held. */ | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1167 | static void tg3_wait_for_event_ack(struct tg3 *tp) | 
 | 1168 | { | 
 | 1169 | 	int i; | 
| Matt Carlson | 4ba526c | 2008-08-15 14:10:04 -0700 | [diff] [blame] | 1170 | 	unsigned int delay_cnt; | 
 | 1171 | 	long time_remain; | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1172 |  | 
| Matt Carlson | 4ba526c | 2008-08-15 14:10:04 -0700 | [diff] [blame] | 1173 | 	/* If enough time has passed, no wait is necessary. */ | 
 | 1174 | 	time_remain = (long)(tp->last_event_jiffies + 1 + | 
 | 1175 | 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - | 
 | 1176 | 		      (long)jiffies; | 
 | 1177 | 	if (time_remain < 0) | 
 | 1178 | 		return; | 
 | 1179 |  | 
 | 1180 | 	/* Check if we can shorten the wait time. */ | 
 | 1181 | 	delay_cnt = jiffies_to_usecs(time_remain); | 
 | 1182 | 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) | 
 | 1183 | 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; | 
 | 1184 | 	delay_cnt = (delay_cnt >> 3) + 1; | 
 | 1185 |  | 
 | 1186 | 	for (i = 0; i < delay_cnt; i++) { | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1187 | 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) | 
 | 1188 | 			break; | 
| Matt Carlson | 4ba526c | 2008-08-15 14:10:04 -0700 | [diff] [blame] | 1189 | 		udelay(8); | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1190 | 	} | 
 | 1191 | } | 
 | 1192 |  | 
 | 1193 | /* tp->lock is held. */ | 
 | 1194 | static void tg3_ump_link_report(struct tg3 *tp) | 
 | 1195 | { | 
 | 1196 | 	u32 reg; | 
 | 1197 | 	u32 val; | 
 | 1198 |  | 
 | 1199 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || | 
 | 1200 | 	    !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF)) | 
 | 1201 | 		return; | 
 | 1202 |  | 
 | 1203 | 	tg3_wait_for_event_ack(tp); | 
 | 1204 |  | 
 | 1205 | 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); | 
 | 1206 |  | 
 | 1207 | 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); | 
 | 1208 |  | 
 | 1209 | 	val = 0; | 
 | 1210 | 	if (!tg3_readphy(tp, MII_BMCR, ®)) | 
 | 1211 | 		val = reg << 16; | 
 | 1212 | 	if (!tg3_readphy(tp, MII_BMSR, ®)) | 
 | 1213 | 		val |= (reg & 0xffff); | 
 | 1214 | 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); | 
 | 1215 |  | 
 | 1216 | 	val = 0; | 
 | 1217 | 	if (!tg3_readphy(tp, MII_ADVERTISE, ®)) | 
 | 1218 | 		val = reg << 16; | 
 | 1219 | 	if (!tg3_readphy(tp, MII_LPA, ®)) | 
 | 1220 | 		val |= (reg & 0xffff); | 
 | 1221 | 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); | 
 | 1222 |  | 
 | 1223 | 	val = 0; | 
 | 1224 | 	if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) { | 
 | 1225 | 		if (!tg3_readphy(tp, MII_CTRL1000, ®)) | 
 | 1226 | 			val = reg << 16; | 
 | 1227 | 		if (!tg3_readphy(tp, MII_STAT1000, ®)) | 
 | 1228 | 			val |= (reg & 0xffff); | 
 | 1229 | 	} | 
 | 1230 | 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); | 
 | 1231 |  | 
 | 1232 | 	if (!tg3_readphy(tp, MII_PHYADDR, ®)) | 
 | 1233 | 		val = reg << 16; | 
 | 1234 | 	else | 
 | 1235 | 		val = 0; | 
 | 1236 | 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); | 
 | 1237 |  | 
| Matt Carlson | 4ba526c | 2008-08-15 14:10:04 -0700 | [diff] [blame] | 1238 | 	tg3_generate_fw_event(tp); | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1239 | } | 
 | 1240 |  | 
 | 1241 | static void tg3_link_report(struct tg3 *tp) | 
 | 1242 | { | 
 | 1243 | 	if (!netif_carrier_ok(tp->dev)) { | 
 | 1244 | 		if (netif_msg_link(tp)) | 
 | 1245 | 			printk(KERN_INFO PFX "%s: Link is down.\n", | 
 | 1246 | 			       tp->dev->name); | 
 | 1247 | 		tg3_ump_link_report(tp); | 
 | 1248 | 	} else if (netif_msg_link(tp)) { | 
 | 1249 | 		printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", | 
 | 1250 | 		       tp->dev->name, | 
 | 1251 | 		       (tp->link_config.active_speed == SPEED_1000 ? | 
 | 1252 | 			1000 : | 
 | 1253 | 			(tp->link_config.active_speed == SPEED_100 ? | 
 | 1254 | 			 100 : 10)), | 
 | 1255 | 		       (tp->link_config.active_duplex == DUPLEX_FULL ? | 
 | 1256 | 			"full" : "half")); | 
 | 1257 |  | 
 | 1258 | 		printk(KERN_INFO PFX | 
 | 1259 | 		       "%s: Flow control is %s for TX and %s for RX.\n", | 
 | 1260 | 		       tp->dev->name, | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1261 | 		       (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1262 | 		       "on" : "off", | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1263 | 		       (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1264 | 		       "on" : "off"); | 
 | 1265 | 		tg3_ump_link_report(tp); | 
 | 1266 | 	} | 
 | 1267 | } | 
 | 1268 |  | 
 | 1269 | static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) | 
 | 1270 | { | 
 | 1271 | 	u16 miireg; | 
 | 1272 |  | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1273 | 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1274 | 		miireg = ADVERTISE_PAUSE_CAP; | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1275 | 	else if (flow_ctrl & FLOW_CTRL_TX) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1276 | 		miireg = ADVERTISE_PAUSE_ASYM; | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1277 | 	else if (flow_ctrl & FLOW_CTRL_RX) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1278 | 		miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | 
 | 1279 | 	else | 
 | 1280 | 		miireg = 0; | 
 | 1281 |  | 
 | 1282 | 	return miireg; | 
 | 1283 | } | 
 | 1284 |  | 
 | 1285 | static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) | 
 | 1286 | { | 
 | 1287 | 	u16 miireg; | 
 | 1288 |  | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1289 | 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1290 | 		miireg = ADVERTISE_1000XPAUSE; | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1291 | 	else if (flow_ctrl & FLOW_CTRL_TX) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1292 | 		miireg = ADVERTISE_1000XPSE_ASYM; | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1293 | 	else if (flow_ctrl & FLOW_CTRL_RX) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1294 | 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; | 
 | 1295 | 	else | 
 | 1296 | 		miireg = 0; | 
 | 1297 |  | 
 | 1298 | 	return miireg; | 
 | 1299 | } | 
 | 1300 |  | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1301 | static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) | 
 | 1302 | { | 
 | 1303 | 	u8 cap = 0; | 
 | 1304 |  | 
 | 1305 | 	if (lcladv & ADVERTISE_1000XPAUSE) { | 
 | 1306 | 		if (lcladv & ADVERTISE_1000XPSE_ASYM) { | 
 | 1307 | 			if (rmtadv & LPA_1000XPAUSE) | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1308 | 				cap = FLOW_CTRL_TX | FLOW_CTRL_RX; | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1309 | 			else if (rmtadv & LPA_1000XPAUSE_ASYM) | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1310 | 				cap = FLOW_CTRL_RX; | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1311 | 		} else { | 
 | 1312 | 			if (rmtadv & LPA_1000XPAUSE) | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1313 | 				cap = FLOW_CTRL_TX | FLOW_CTRL_RX; | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1314 | 		} | 
 | 1315 | 	} else if (lcladv & ADVERTISE_1000XPSE_ASYM) { | 
 | 1316 | 		if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1317 | 			cap = FLOW_CTRL_TX; | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1318 | 	} | 
 | 1319 |  | 
 | 1320 | 	return cap; | 
 | 1321 | } | 
 | 1322 |  | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 1323 | static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1324 | { | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1325 | 	u8 autoneg; | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 1326 | 	u8 flowctrl = 0; | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1327 | 	u32 old_rx_mode = tp->rx_mode; | 
 | 1328 | 	u32 old_tx_mode = tp->tx_mode; | 
 | 1329 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1330 | 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1331 | 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1332 | 	else | 
 | 1333 | 		autoneg = tp->link_config.autoneg; | 
 | 1334 |  | 
 | 1335 | 	if (autoneg == AUTONEG_ENABLE && | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1336 | 	    (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) { | 
 | 1337 | 		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 1338 | 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1339 | 		else | 
| Steve Glendinning | bc02ff9 | 2008-12-16 02:00:48 -0800 | [diff] [blame] | 1340 | 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 1341 | 	} else | 
 | 1342 | 		flowctrl = tp->link_config.flowctrl; | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1343 |  | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 1344 | 	tp->link_config.active_flowctrl = flowctrl; | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1345 |  | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1346 | 	if (flowctrl & FLOW_CTRL_RX) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1347 | 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; | 
 | 1348 | 	else | 
 | 1349 | 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; | 
 | 1350 |  | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 1351 | 	if (old_rx_mode != tp->rx_mode) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1352 | 		tw32_f(MAC_RX_MODE, tp->rx_mode); | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1353 |  | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 1354 | 	if (flowctrl & FLOW_CTRL_TX) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1355 | 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; | 
 | 1356 | 	else | 
 | 1357 | 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; | 
 | 1358 |  | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 1359 | 	if (old_tx_mode != tp->tx_mode) | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1360 | 		tw32_f(MAC_TX_MODE, tp->tx_mode); | 
| Matt Carlson | 95e2869 | 2008-05-25 23:44:14 -0700 | [diff] [blame] | 1361 | } | 
 | 1362 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1363 | static void tg3_adjust_link(struct net_device *dev) | 
 | 1364 | { | 
 | 1365 | 	u8 oldflowctrl, linkmesg = 0; | 
 | 1366 | 	u32 mac_mode, lcl_adv, rmt_adv; | 
 | 1367 | 	struct tg3 *tp = netdev_priv(dev); | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1368 | 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1369 |  | 
| Matt Carlson | 24bb4fb | 2009-10-05 17:55:29 +0000 | [diff] [blame] | 1370 | 	spin_lock_bh(&tp->lock); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1371 |  | 
 | 1372 | 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | | 
 | 1373 | 				    MAC_MODE_HALF_DUPLEX); | 
 | 1374 |  | 
 | 1375 | 	oldflowctrl = tp->link_config.active_flowctrl; | 
 | 1376 |  | 
 | 1377 | 	if (phydev->link) { | 
 | 1378 | 		lcl_adv = 0; | 
 | 1379 | 		rmt_adv = 0; | 
 | 1380 |  | 
 | 1381 | 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) | 
 | 1382 | 			mac_mode |= MAC_MODE_PORT_MODE_MII; | 
| Matt Carlson | c3df074 | 2009-11-02 14:27:02 +0000 | [diff] [blame] | 1383 | 		else if (phydev->speed == SPEED_1000 || | 
 | 1384 | 			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1385 | 			mac_mode |= MAC_MODE_PORT_MODE_GMII; | 
| Matt Carlson | c3df074 | 2009-11-02 14:27:02 +0000 | [diff] [blame] | 1386 | 		else | 
 | 1387 | 			mac_mode |= MAC_MODE_PORT_MODE_MII; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1388 |  | 
 | 1389 | 		if (phydev->duplex == DUPLEX_HALF) | 
 | 1390 | 			mac_mode |= MAC_MODE_HALF_DUPLEX; | 
 | 1391 | 		else { | 
 | 1392 | 			lcl_adv = tg3_advert_flowctrl_1000T( | 
 | 1393 | 				  tp->link_config.flowctrl); | 
 | 1394 |  | 
 | 1395 | 			if (phydev->pause) | 
 | 1396 | 				rmt_adv = LPA_PAUSE_CAP; | 
 | 1397 | 			if (phydev->asym_pause) | 
 | 1398 | 				rmt_adv |= LPA_PAUSE_ASYM; | 
 | 1399 | 		} | 
 | 1400 |  | 
 | 1401 | 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv); | 
 | 1402 | 	} else | 
 | 1403 | 		mac_mode |= MAC_MODE_PORT_MODE_GMII; | 
 | 1404 |  | 
 | 1405 | 	if (mac_mode != tp->mac_mode) { | 
 | 1406 | 		tp->mac_mode = mac_mode; | 
 | 1407 | 		tw32_f(MAC_MODE, tp->mac_mode); | 
 | 1408 | 		udelay(40); | 
 | 1409 | 	} | 
 | 1410 |  | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 1411 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { | 
 | 1412 | 		if (phydev->speed == SPEED_10) | 
 | 1413 | 			tw32(MAC_MI_STAT, | 
 | 1414 | 			     MAC_MI_STAT_10MBPS_MODE | | 
 | 1415 | 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB); | 
 | 1416 | 		else | 
 | 1417 | 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); | 
 | 1418 | 	} | 
 | 1419 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1420 | 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) | 
 | 1421 | 		tw32(MAC_TX_LENGTHS, | 
 | 1422 | 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 
 | 1423 | 		      (6 << TX_LENGTHS_IPG_SHIFT) | | 
 | 1424 | 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); | 
 | 1425 | 	else | 
 | 1426 | 		tw32(MAC_TX_LENGTHS, | 
 | 1427 | 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 
 | 1428 | 		      (6 << TX_LENGTHS_IPG_SHIFT) | | 
 | 1429 | 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); | 
 | 1430 |  | 
 | 1431 | 	if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) || | 
 | 1432 | 	    (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) || | 
 | 1433 | 	    phydev->speed != tp->link_config.active_speed || | 
 | 1434 | 	    phydev->duplex != tp->link_config.active_duplex || | 
 | 1435 | 	    oldflowctrl != tp->link_config.active_flowctrl) | 
 | 1436 | 	    linkmesg = 1; | 
 | 1437 |  | 
 | 1438 | 	tp->link_config.active_speed = phydev->speed; | 
 | 1439 | 	tp->link_config.active_duplex = phydev->duplex; | 
 | 1440 |  | 
| Matt Carlson | 24bb4fb | 2009-10-05 17:55:29 +0000 | [diff] [blame] | 1441 | 	spin_unlock_bh(&tp->lock); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1442 |  | 
 | 1443 | 	if (linkmesg) | 
 | 1444 | 		tg3_link_report(tp); | 
 | 1445 | } | 
 | 1446 |  | 
 | 1447 | static int tg3_phy_init(struct tg3 *tp) | 
 | 1448 | { | 
 | 1449 | 	struct phy_device *phydev; | 
 | 1450 |  | 
 | 1451 | 	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) | 
 | 1452 | 		return 0; | 
 | 1453 |  | 
 | 1454 | 	/* Bring the PHY back to a known state. */ | 
 | 1455 | 	tg3_bmcr_reset(tp); | 
 | 1456 |  | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1457 | 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1458 |  | 
 | 1459 | 	/* Attach the MAC to the PHY. */ | 
| Kay Sievers | fb28ad3 | 2008-11-10 13:55:14 -0800 | [diff] [blame] | 1460 | 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 1461 | 			     phydev->dev_flags, phydev->interface); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1462 | 	if (IS_ERR(phydev)) { | 
 | 1463 | 		printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name); | 
 | 1464 | 		return PTR_ERR(phydev); | 
 | 1465 | 	} | 
 | 1466 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1467 | 	/* Mask with MAC supported features. */ | 
| Matt Carlson | 9c61d6b | 2008-11-03 16:54:56 -0800 | [diff] [blame] | 1468 | 	switch (phydev->interface) { | 
 | 1469 | 	case PHY_INTERFACE_MODE_GMII: | 
 | 1470 | 	case PHY_INTERFACE_MODE_RGMII: | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 1471 | 		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { | 
 | 1472 | 			phydev->supported &= (PHY_GBIT_FEATURES | | 
 | 1473 | 					      SUPPORTED_Pause | | 
 | 1474 | 					      SUPPORTED_Asym_Pause); | 
 | 1475 | 			break; | 
 | 1476 | 		} | 
 | 1477 | 		/* fallthru */ | 
| Matt Carlson | 9c61d6b | 2008-11-03 16:54:56 -0800 | [diff] [blame] | 1478 | 	case PHY_INTERFACE_MODE_MII: | 
 | 1479 | 		phydev->supported &= (PHY_BASIC_FEATURES | | 
 | 1480 | 				      SUPPORTED_Pause | | 
 | 1481 | 				      SUPPORTED_Asym_Pause); | 
 | 1482 | 		break; | 
 | 1483 | 	default: | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1484 | 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); | 
| Matt Carlson | 9c61d6b | 2008-11-03 16:54:56 -0800 | [diff] [blame] | 1485 | 		return -EINVAL; | 
 | 1486 | 	} | 
 | 1487 |  | 
 | 1488 | 	tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1489 |  | 
 | 1490 | 	phydev->advertising = phydev->supported; | 
 | 1491 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1492 | 	return 0; | 
 | 1493 | } | 
 | 1494 |  | 
 | 1495 | static void tg3_phy_start(struct tg3 *tp) | 
 | 1496 | { | 
 | 1497 | 	struct phy_device *phydev; | 
 | 1498 |  | 
 | 1499 | 	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 
 | 1500 | 		return; | 
 | 1501 |  | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1502 | 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1503 |  | 
 | 1504 | 	if (tp->link_config.phy_is_low_power) { | 
 | 1505 | 		tp->link_config.phy_is_low_power = 0; | 
 | 1506 | 		phydev->speed = tp->link_config.orig_speed; | 
 | 1507 | 		phydev->duplex = tp->link_config.orig_duplex; | 
 | 1508 | 		phydev->autoneg = tp->link_config.orig_autoneg; | 
 | 1509 | 		phydev->advertising = tp->link_config.orig_advertising; | 
 | 1510 | 	} | 
 | 1511 |  | 
 | 1512 | 	phy_start(phydev); | 
 | 1513 |  | 
 | 1514 | 	phy_start_aneg(phydev); | 
 | 1515 | } | 
 | 1516 |  | 
 | 1517 | static void tg3_phy_stop(struct tg3 *tp) | 
 | 1518 | { | 
 | 1519 | 	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 
 | 1520 | 		return; | 
 | 1521 |  | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1522 | 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1523 | } | 
 | 1524 |  | 
 | 1525 | static void tg3_phy_fini(struct tg3 *tp) | 
 | 1526 | { | 
 | 1527 | 	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 1528 | 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 1529 | 		tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; | 
 | 1530 | 	} | 
 | 1531 | } | 
 | 1532 |  | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 1533 | static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) | 
 | 1534 | { | 
 | 1535 | 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); | 
 | 1536 | 	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); | 
 | 1537 | } | 
 | 1538 |  | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 1539 | static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) | 
 | 1540 | { | 
 | 1541 | 	u32 phytest; | 
 | 1542 |  | 
 | 1543 | 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { | 
 | 1544 | 		u32 phy; | 
 | 1545 |  | 
 | 1546 | 		tg3_writephy(tp, MII_TG3_FET_TEST, | 
 | 1547 | 			     phytest | MII_TG3_FET_SHADOW_EN); | 
 | 1548 | 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { | 
 | 1549 | 			if (enable) | 
 | 1550 | 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; | 
 | 1551 | 			else | 
 | 1552 | 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; | 
 | 1553 | 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); | 
 | 1554 | 		} | 
 | 1555 | 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest); | 
 | 1556 | 	} | 
 | 1557 | } | 
 | 1558 |  | 
| Matt Carlson | 6833c04 | 2008-11-21 17:18:59 -0800 | [diff] [blame] | 1559 | static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) | 
 | 1560 | { | 
 | 1561 | 	u32 reg; | 
 | 1562 |  | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 1563 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
| Matt Carlson | 6833c04 | 2008-11-21 17:18:59 -0800 | [diff] [blame] | 1564 | 		return; | 
 | 1565 |  | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 1566 | 	if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | 
 | 1567 | 		tg3_phy_fet_toggle_apd(tp, enable); | 
 | 1568 | 		return; | 
 | 1569 | 	} | 
 | 1570 |  | 
| Matt Carlson | 6833c04 | 2008-11-21 17:18:59 -0800 | [diff] [blame] | 1571 | 	reg = MII_TG3_MISC_SHDW_WREN | | 
 | 1572 | 	      MII_TG3_MISC_SHDW_SCR5_SEL | | 
 | 1573 | 	      MII_TG3_MISC_SHDW_SCR5_LPED | | 
 | 1574 | 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM | | 
 | 1575 | 	      MII_TG3_MISC_SHDW_SCR5_SDTL | | 
 | 1576 | 	      MII_TG3_MISC_SHDW_SCR5_C125OE; | 
 | 1577 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) | 
 | 1578 | 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; | 
 | 1579 |  | 
 | 1580 | 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); | 
 | 1581 |  | 
 | 1582 |  | 
 | 1583 | 	reg = MII_TG3_MISC_SHDW_WREN | | 
 | 1584 | 	      MII_TG3_MISC_SHDW_APD_SEL | | 
 | 1585 | 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS; | 
 | 1586 | 	if (enable) | 
 | 1587 | 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE; | 
 | 1588 |  | 
 | 1589 | 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); | 
 | 1590 | } | 
 | 1591 |  | 
| Matt Carlson | 9ef8ca9 | 2007-07-11 19:48:29 -0700 | [diff] [blame] | 1592 | static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) | 
 | 1593 | { | 
 | 1594 | 	u32 phy; | 
 | 1595 |  | 
 | 1596 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 
 | 1597 | 	    (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) | 
 | 1598 | 		return; | 
 | 1599 |  | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 1600 | 	if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | 
| Matt Carlson | 9ef8ca9 | 2007-07-11 19:48:29 -0700 | [diff] [blame] | 1601 | 		u32 ephy; | 
 | 1602 |  | 
| Matt Carlson | 535ef6e | 2009-08-25 10:09:36 +0000 | [diff] [blame] | 1603 | 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { | 
 | 1604 | 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL; | 
 | 1605 |  | 
 | 1606 | 			tg3_writephy(tp, MII_TG3_FET_TEST, | 
 | 1607 | 				     ephy | MII_TG3_FET_SHADOW_EN); | 
 | 1608 | 			if (!tg3_readphy(tp, reg, &phy)) { | 
| Matt Carlson | 9ef8ca9 | 2007-07-11 19:48:29 -0700 | [diff] [blame] | 1609 | 				if (enable) | 
| Matt Carlson | 535ef6e | 2009-08-25 10:09:36 +0000 | [diff] [blame] | 1610 | 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; | 
| Matt Carlson | 9ef8ca9 | 2007-07-11 19:48:29 -0700 | [diff] [blame] | 1611 | 				else | 
| Matt Carlson | 535ef6e | 2009-08-25 10:09:36 +0000 | [diff] [blame] | 1612 | 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; | 
 | 1613 | 				tg3_writephy(tp, reg, phy); | 
| Matt Carlson | 9ef8ca9 | 2007-07-11 19:48:29 -0700 | [diff] [blame] | 1614 | 			} | 
| Matt Carlson | 535ef6e | 2009-08-25 10:09:36 +0000 | [diff] [blame] | 1615 | 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy); | 
| Matt Carlson | 9ef8ca9 | 2007-07-11 19:48:29 -0700 | [diff] [blame] | 1616 | 		} | 
 | 1617 | 	} else { | 
 | 1618 | 		phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC | | 
 | 1619 | 		      MII_TG3_AUXCTL_SHDWSEL_MISC; | 
 | 1620 | 		if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) && | 
 | 1621 | 		    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) { | 
 | 1622 | 			if (enable) | 
 | 1623 | 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; | 
 | 1624 | 			else | 
 | 1625 | 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; | 
 | 1626 | 			phy |= MII_TG3_AUXCTL_MISC_WREN; | 
 | 1627 | 			tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); | 
 | 1628 | 		} | 
 | 1629 | 	} | 
 | 1630 | } | 
 | 1631 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1632 | static void tg3_phy_set_wirespeed(struct tg3 *tp) | 
 | 1633 | { | 
 | 1634 | 	u32 val; | 
 | 1635 |  | 
 | 1636 | 	if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) | 
 | 1637 | 		return; | 
 | 1638 |  | 
 | 1639 | 	if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) && | 
 | 1640 | 	    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val)) | 
 | 1641 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, | 
 | 1642 | 			     (val | (1 << 15) | (1 << 4))); | 
 | 1643 | } | 
 | 1644 |  | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 1645 | static void tg3_phy_apply_otp(struct tg3 *tp) | 
 | 1646 | { | 
 | 1647 | 	u32 otp, phy; | 
 | 1648 |  | 
 | 1649 | 	if (!tp->phy_otp) | 
 | 1650 | 		return; | 
 | 1651 |  | 
 | 1652 | 	otp = tp->phy_otp; | 
 | 1653 |  | 
 | 1654 | 	/* Enable SM_DSP clock and tx 6dB coding. */ | 
 | 1655 | 	phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | | 
 | 1656 | 	      MII_TG3_AUXCTL_ACTL_SMDSP_ENA | | 
 | 1657 | 	      MII_TG3_AUXCTL_ACTL_TX_6DB; | 
 | 1658 | 	tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); | 
 | 1659 |  | 
 | 1660 | 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); | 
 | 1661 | 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; | 
 | 1662 | 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); | 
 | 1663 |  | 
 | 1664 | 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | | 
 | 1665 | 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); | 
 | 1666 | 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); | 
 | 1667 |  | 
 | 1668 | 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); | 
 | 1669 | 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; | 
 | 1670 | 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); | 
 | 1671 |  | 
 | 1672 | 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); | 
 | 1673 | 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); | 
 | 1674 |  | 
 | 1675 | 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); | 
 | 1676 | 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); | 
 | 1677 |  | 
 | 1678 | 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | | 
 | 1679 | 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); | 
 | 1680 | 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); | 
 | 1681 |  | 
 | 1682 | 	/* Turn off SM_DSP clock. */ | 
 | 1683 | 	phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | | 
 | 1684 | 	      MII_TG3_AUXCTL_ACTL_TX_6DB; | 
 | 1685 | 	tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); | 
 | 1686 | } | 
 | 1687 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1688 | static int tg3_wait_macro_done(struct tg3 *tp) | 
 | 1689 | { | 
 | 1690 | 	int limit = 100; | 
 | 1691 |  | 
 | 1692 | 	while (limit--) { | 
 | 1693 | 		u32 tmp32; | 
 | 1694 |  | 
 | 1695 | 		if (!tg3_readphy(tp, 0x16, &tmp32)) { | 
 | 1696 | 			if ((tmp32 & 0x1000) == 0) | 
 | 1697 | 				break; | 
 | 1698 | 		} | 
 | 1699 | 	} | 
| Roel Kluin | d4675b5 | 2009-02-12 16:33:27 -0800 | [diff] [blame] | 1700 | 	if (limit < 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1701 | 		return -EBUSY; | 
 | 1702 |  | 
 | 1703 | 	return 0; | 
 | 1704 | } | 
 | 1705 |  | 
 | 1706 | static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) | 
 | 1707 | { | 
 | 1708 | 	static const u32 test_pat[4][6] = { | 
 | 1709 | 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, | 
 | 1710 | 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, | 
 | 1711 | 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, | 
 | 1712 | 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } | 
 | 1713 | 	}; | 
 | 1714 | 	int chan; | 
 | 1715 |  | 
 | 1716 | 	for (chan = 0; chan < 4; chan++) { | 
 | 1717 | 		int i; | 
 | 1718 |  | 
 | 1719 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, | 
 | 1720 | 			     (chan * 0x2000) | 0x0200); | 
 | 1721 | 		tg3_writephy(tp, 0x16, 0x0002); | 
 | 1722 |  | 
 | 1723 | 		for (i = 0; i < 6; i++) | 
 | 1724 | 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, | 
 | 1725 | 				     test_pat[chan][i]); | 
 | 1726 |  | 
 | 1727 | 		tg3_writephy(tp, 0x16, 0x0202); | 
 | 1728 | 		if (tg3_wait_macro_done(tp)) { | 
 | 1729 | 			*resetp = 1; | 
 | 1730 | 			return -EBUSY; | 
 | 1731 | 		} | 
 | 1732 |  | 
 | 1733 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, | 
 | 1734 | 			     (chan * 0x2000) | 0x0200); | 
 | 1735 | 		tg3_writephy(tp, 0x16, 0x0082); | 
 | 1736 | 		if (tg3_wait_macro_done(tp)) { | 
 | 1737 | 			*resetp = 1; | 
 | 1738 | 			return -EBUSY; | 
 | 1739 | 		} | 
 | 1740 |  | 
 | 1741 | 		tg3_writephy(tp, 0x16, 0x0802); | 
 | 1742 | 		if (tg3_wait_macro_done(tp)) { | 
 | 1743 | 			*resetp = 1; | 
 | 1744 | 			return -EBUSY; | 
 | 1745 | 		} | 
 | 1746 |  | 
 | 1747 | 		for (i = 0; i < 6; i += 2) { | 
 | 1748 | 			u32 low, high; | 
 | 1749 |  | 
 | 1750 | 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || | 
 | 1751 | 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || | 
 | 1752 | 			    tg3_wait_macro_done(tp)) { | 
 | 1753 | 				*resetp = 1; | 
 | 1754 | 				return -EBUSY; | 
 | 1755 | 			} | 
 | 1756 | 			low &= 0x7fff; | 
 | 1757 | 			high &= 0x000f; | 
 | 1758 | 			if (low != test_pat[chan][i] || | 
 | 1759 | 			    high != test_pat[chan][i+1]) { | 
 | 1760 | 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); | 
 | 1761 | 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); | 
 | 1762 | 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); | 
 | 1763 |  | 
 | 1764 | 				return -EBUSY; | 
 | 1765 | 			} | 
 | 1766 | 		} | 
 | 1767 | 	} | 
 | 1768 |  | 
 | 1769 | 	return 0; | 
 | 1770 | } | 
 | 1771 |  | 
 | 1772 | static int tg3_phy_reset_chanpat(struct tg3 *tp) | 
 | 1773 | { | 
 | 1774 | 	int chan; | 
 | 1775 |  | 
 | 1776 | 	for (chan = 0; chan < 4; chan++) { | 
 | 1777 | 		int i; | 
 | 1778 |  | 
 | 1779 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, | 
 | 1780 | 			     (chan * 0x2000) | 0x0200); | 
 | 1781 | 		tg3_writephy(tp, 0x16, 0x0002); | 
 | 1782 | 		for (i = 0; i < 6; i++) | 
 | 1783 | 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); | 
 | 1784 | 		tg3_writephy(tp, 0x16, 0x0202); | 
 | 1785 | 		if (tg3_wait_macro_done(tp)) | 
 | 1786 | 			return -EBUSY; | 
 | 1787 | 	} | 
 | 1788 |  | 
 | 1789 | 	return 0; | 
 | 1790 | } | 
 | 1791 |  | 
 | 1792 | static int tg3_phy_reset_5703_4_5(struct tg3 *tp) | 
 | 1793 | { | 
 | 1794 | 	u32 reg32, phy9_orig; | 
 | 1795 | 	int retries, do_phy_reset, err; | 
 | 1796 |  | 
 | 1797 | 	retries = 10; | 
 | 1798 | 	do_phy_reset = 1; | 
 | 1799 | 	do { | 
 | 1800 | 		if (do_phy_reset) { | 
 | 1801 | 			err = tg3_bmcr_reset(tp); | 
 | 1802 | 			if (err) | 
 | 1803 | 				return err; | 
 | 1804 | 			do_phy_reset = 0; | 
 | 1805 | 		} | 
 | 1806 |  | 
 | 1807 | 		/* Disable transmitter and interrupt.  */ | 
 | 1808 | 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) | 
 | 1809 | 			continue; | 
 | 1810 |  | 
 | 1811 | 		reg32 |= 0x3000; | 
 | 1812 | 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); | 
 | 1813 |  | 
 | 1814 | 		/* Set full-duplex, 1000 mbps.  */ | 
 | 1815 | 		tg3_writephy(tp, MII_BMCR, | 
 | 1816 | 			     BMCR_FULLDPLX | TG3_BMCR_SPEED1000); | 
 | 1817 |  | 
 | 1818 | 		/* Set to master mode.  */ | 
 | 1819 | 		if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig)) | 
 | 1820 | 			continue; | 
 | 1821 |  | 
 | 1822 | 		tg3_writephy(tp, MII_TG3_CTRL, | 
 | 1823 | 			     (MII_TG3_CTRL_AS_MASTER | | 
 | 1824 | 			      MII_TG3_CTRL_ENABLE_AS_MASTER)); | 
 | 1825 |  | 
 | 1826 | 		/* Enable SM_DSP_CLOCK and 6dB.  */ | 
 | 1827 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 
 | 1828 |  | 
 | 1829 | 		/* Block the PHY control access.  */ | 
 | 1830 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); | 
 | 1831 | 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800); | 
 | 1832 |  | 
 | 1833 | 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); | 
 | 1834 | 		if (!err) | 
 | 1835 | 			break; | 
 | 1836 | 	} while (--retries); | 
 | 1837 |  | 
 | 1838 | 	err = tg3_phy_reset_chanpat(tp); | 
 | 1839 | 	if (err) | 
 | 1840 | 		return err; | 
 | 1841 |  | 
 | 1842 | 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005); | 
 | 1843 | 	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000); | 
 | 1844 |  | 
 | 1845 | 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); | 
 | 1846 | 	tg3_writephy(tp, 0x16, 0x0000); | 
 | 1847 |  | 
 | 1848 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 
 | 1849 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 
 | 1850 | 		/* Set Extended packet length bit for jumbo frames */ | 
 | 1851 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); | 
 | 1852 | 	} | 
 | 1853 | 	else { | 
 | 1854 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 
 | 1855 | 	} | 
 | 1856 |  | 
 | 1857 | 	tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); | 
 | 1858 |  | 
 | 1859 | 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { | 
 | 1860 | 		reg32 &= ~0x3000; | 
 | 1861 | 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); | 
 | 1862 | 	} else if (!err) | 
 | 1863 | 		err = -EBUSY; | 
 | 1864 |  | 
 | 1865 | 	return err; | 
 | 1866 | } | 
 | 1867 |  | 
 | 1868 | /* This will reset the tigon3 PHY if there is no valid | 
 | 1869 |  * link unless the FORCE argument is non-zero. | 
 | 1870 |  */ | 
 | 1871 | static int tg3_phy_reset(struct tg3 *tp) | 
 | 1872 | { | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 1873 | 	u32 cpmuctrl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1874 | 	u32 phy_status; | 
 | 1875 | 	int err; | 
 | 1876 |  | 
| Michael Chan | 60189dd | 2006-12-17 17:08:07 -0800 | [diff] [blame] | 1877 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
 | 1878 | 		u32 val; | 
 | 1879 |  | 
 | 1880 | 		val = tr32(GRC_MISC_CFG); | 
 | 1881 | 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); | 
 | 1882 | 		udelay(40); | 
 | 1883 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1884 | 	err  = tg3_readphy(tp, MII_BMSR, &phy_status); | 
 | 1885 | 	err |= tg3_readphy(tp, MII_BMSR, &phy_status); | 
 | 1886 | 	if (err != 0) | 
 | 1887 | 		return -EBUSY; | 
 | 1888 |  | 
| Michael Chan | c8e1e82 | 2006-04-29 18:55:17 -0700 | [diff] [blame] | 1889 | 	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { | 
 | 1890 | 		netif_carrier_off(tp->dev); | 
 | 1891 | 		tg3_link_report(tp); | 
 | 1892 | 	} | 
 | 1893 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1894 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 
 | 1895 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 
 | 1896 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 
 | 1897 | 		err = tg3_phy_reset_5703_4_5(tp); | 
 | 1898 | 		if (err) | 
 | 1899 | 			return err; | 
 | 1900 | 		goto out; | 
 | 1901 | 	} | 
 | 1902 |  | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 1903 | 	cpmuctrl = 0; | 
 | 1904 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 
 | 1905 | 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { | 
 | 1906 | 		cpmuctrl = tr32(TG3_CPMU_CTRL); | 
 | 1907 | 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) | 
 | 1908 | 			tw32(TG3_CPMU_CTRL, | 
 | 1909 | 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); | 
 | 1910 | 	} | 
 | 1911 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 | 	err = tg3_bmcr_reset(tp); | 
 | 1913 | 	if (err) | 
 | 1914 | 		return err; | 
 | 1915 |  | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 1916 | 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { | 
 | 1917 | 		u32 phy; | 
 | 1918 |  | 
 | 1919 | 		phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; | 
 | 1920 | 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy); | 
 | 1921 |  | 
 | 1922 | 		tw32(TG3_CPMU_CTRL, cpmuctrl); | 
 | 1923 | 	} | 
 | 1924 |  | 
| Matt Carlson | bcb37f6 | 2008-11-03 16:52:09 -0800 | [diff] [blame] | 1925 | 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || | 
 | 1926 | 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { | 
| Matt Carlson | ce057f0 | 2007-11-12 21:08:03 -0800 | [diff] [blame] | 1927 | 		u32 val; | 
 | 1928 |  | 
 | 1929 | 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK); | 
 | 1930 | 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == | 
 | 1931 | 		    CPMU_LSPD_1000MB_MACCLK_12_5) { | 
 | 1932 | 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; | 
 | 1933 | 			udelay(40); | 
 | 1934 | 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); | 
 | 1935 | 		} | 
 | 1936 | 	} | 
 | 1937 |  | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 1938 | 	tg3_phy_apply_otp(tp); | 
 | 1939 |  | 
| Matt Carlson | 6833c04 | 2008-11-21 17:18:59 -0800 | [diff] [blame] | 1940 | 	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) | 
 | 1941 | 		tg3_phy_toggle_apd(tp, true); | 
 | 1942 | 	else | 
 | 1943 | 		tg3_phy_toggle_apd(tp, false); | 
 | 1944 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1945 | out: | 
 | 1946 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { | 
 | 1947 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 
 | 1948 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); | 
 | 1949 | 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa); | 
 | 1950 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); | 
 | 1951 | 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323); | 
 | 1952 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 
 | 1953 | 	} | 
 | 1954 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) { | 
 | 1955 | 		tg3_writephy(tp, 0x1c, 0x8d68); | 
 | 1956 | 		tg3_writephy(tp, 0x1c, 0x8d68); | 
 | 1957 | 	} | 
 | 1958 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) { | 
 | 1959 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 
 | 1960 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); | 
 | 1961 | 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b); | 
 | 1962 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); | 
 | 1963 | 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506); | 
 | 1964 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); | 
 | 1965 | 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); | 
 | 1966 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 
 | 1967 | 	} | 
| Michael Chan | c424cb2 | 2006-04-29 18:56:34 -0700 | [diff] [blame] | 1968 | 	else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { | 
 | 1969 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); | 
 | 1970 | 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); | 
| Michael Chan | c1d2a19 | 2007-01-08 19:57:20 -0800 | [diff] [blame] | 1971 | 		if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { | 
 | 1972 | 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); | 
 | 1973 | 			tg3_writephy(tp, MII_TG3_TEST1, | 
 | 1974 | 				     MII_TG3_TEST1_TRIM_EN | 0x4); | 
 | 1975 | 		} else | 
 | 1976 | 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); | 
| Michael Chan | c424cb2 | 2006-04-29 18:56:34 -0700 | [diff] [blame] | 1977 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); | 
 | 1978 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1979 | 	/* Set Extended packet length bit (bit 14) on all chips that */ | 
 | 1980 | 	/* support jumbo frames */ | 
 | 1981 | 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 
 | 1982 | 		/* Cannot do read-modify-write on 5401 */ | 
 | 1983 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); | 
| Matt Carlson | 8f666b0 | 2009-08-28 13:58:24 +0000 | [diff] [blame] | 1984 | 	} else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1985 | 		u32 phy_reg; | 
 | 1986 |  | 
 | 1987 | 		/* Set bit 14 with read-modify-write to preserve other bits */ | 
 | 1988 | 		if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && | 
 | 1989 | 		    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) | 
 | 1990 | 			tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); | 
 | 1991 | 	} | 
 | 1992 |  | 
 | 1993 | 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support | 
 | 1994 | 	 * jumbo frames transmission. | 
 | 1995 | 	 */ | 
| Matt Carlson | 8f666b0 | 2009-08-28 13:58:24 +0000 | [diff] [blame] | 1996 | 	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1997 | 		u32 phy_reg; | 
 | 1998 |  | 
 | 1999 | 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) | 
 | 2000 | 		    tg3_writephy(tp, MII_TG3_EXT_CTRL, | 
 | 2001 | 				 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); | 
 | 2002 | 	} | 
 | 2003 |  | 
| Michael Chan | 715116a | 2006-09-27 16:09:25 -0700 | [diff] [blame] | 2004 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
| Michael Chan | 715116a | 2006-09-27 16:09:25 -0700 | [diff] [blame] | 2005 | 		/* adjust output voltage */ | 
| Matt Carlson | 535ef6e | 2009-08-25 10:09:36 +0000 | [diff] [blame] | 2006 | 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); | 
| Michael Chan | 715116a | 2006-09-27 16:09:25 -0700 | [diff] [blame] | 2007 | 	} | 
 | 2008 |  | 
| Matt Carlson | 9ef8ca9 | 2007-07-11 19:48:29 -0700 | [diff] [blame] | 2009 | 	tg3_phy_toggle_automdix(tp, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2010 | 	tg3_phy_set_wirespeed(tp); | 
 | 2011 | 	return 0; | 
 | 2012 | } | 
 | 2013 |  | 
 | 2014 | static void tg3_frob_aux_power(struct tg3 *tp) | 
 | 2015 | { | 
 | 2016 | 	struct tg3 *tp_peer = tp; | 
 | 2017 |  | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 2018 | 	if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2019 | 		return; | 
 | 2020 |  | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 2021 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 
 | 2022 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || | 
 | 2023 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 
| Michael Chan | 8c2dc7e | 2005-12-19 16:26:02 -0800 | [diff] [blame] | 2024 | 		struct net_device *dev_peer; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2025 |  | 
| Michael Chan | 8c2dc7e | 2005-12-19 16:26:02 -0800 | [diff] [blame] | 2026 | 		dev_peer = pci_get_drvdata(tp->pdev_peer); | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 2027 | 		/* remove_one() may have been run on the peer. */ | 
| Michael Chan | 8c2dc7e | 2005-12-19 16:26:02 -0800 | [diff] [blame] | 2028 | 		if (!dev_peer) | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 2029 | 			tp_peer = tp; | 
 | 2030 | 		else | 
 | 2031 | 			tp_peer = netdev_priv(dev_peer); | 
| Michael Chan | 8c2dc7e | 2005-12-19 16:26:02 -0800 | [diff] [blame] | 2032 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2033 |  | 
 | 2034 | 	if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || | 
| Michael Chan | 6921d20 | 2005-12-13 21:15:53 -0800 | [diff] [blame] | 2035 | 	    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || | 
 | 2036 | 	    (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || | 
 | 2037 | 	    (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2038 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 2039 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2040 | 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 
 | 2041 | 				    (GRC_LCLCTRL_GPIO_OE0 | | 
 | 2042 | 				     GRC_LCLCTRL_GPIO_OE1 | | 
 | 2043 | 				     GRC_LCLCTRL_GPIO_OE2 | | 
 | 2044 | 				     GRC_LCLCTRL_GPIO_OUTPUT0 | | 
 | 2045 | 				     GRC_LCLCTRL_GPIO_OUTPUT1), | 
 | 2046 | 				    100); | 
| Matt Carlson | 8d519ab | 2009-04-20 06:58:01 +0000 | [diff] [blame] | 2047 | 		} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || | 
 | 2048 | 			   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { | 
| Matt Carlson | 5f0c4a3 | 2008-06-09 15:41:12 -0700 | [diff] [blame] | 2049 | 			/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ | 
 | 2050 | 			u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | | 
 | 2051 | 					     GRC_LCLCTRL_GPIO_OE1 | | 
 | 2052 | 					     GRC_LCLCTRL_GPIO_OE2 | | 
 | 2053 | 					     GRC_LCLCTRL_GPIO_OUTPUT0 | | 
 | 2054 | 					     GRC_LCLCTRL_GPIO_OUTPUT1 | | 
 | 2055 | 					     tp->grc_local_ctrl; | 
 | 2056 | 			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); | 
 | 2057 |  | 
 | 2058 | 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; | 
 | 2059 | 			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); | 
 | 2060 |  | 
 | 2061 | 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; | 
 | 2062 | 			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2063 | 		} else { | 
 | 2064 | 			u32 no_gpio2; | 
| Michael Chan | dc56b7d | 2005-12-19 16:26:28 -0800 | [diff] [blame] | 2065 | 			u32 grc_local_ctrl = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2066 |  | 
 | 2067 | 			if (tp_peer != tp && | 
 | 2068 | 			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) | 
 | 2069 | 				return; | 
 | 2070 |  | 
| Michael Chan | dc56b7d | 2005-12-19 16:26:28 -0800 | [diff] [blame] | 2071 | 			/* Workaround to prevent overdrawing Amps. */ | 
 | 2072 | 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 
 | 2073 | 			    ASIC_REV_5714) { | 
 | 2074 | 				grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2075 | 				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 
 | 2076 | 					    grc_local_ctrl, 100); | 
| Michael Chan | dc56b7d | 2005-12-19 16:26:28 -0800 | [diff] [blame] | 2077 | 			} | 
 | 2078 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2079 | 			/* On 5753 and variants, GPIO2 cannot be used. */ | 
 | 2080 | 			no_gpio2 = tp->nic_sram_data_cfg & | 
 | 2081 | 				    NIC_SRAM_DATA_CFG_NO_GPIO2; | 
 | 2082 |  | 
| Michael Chan | dc56b7d | 2005-12-19 16:26:28 -0800 | [diff] [blame] | 2083 | 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2084 | 					 GRC_LCLCTRL_GPIO_OE1 | | 
 | 2085 | 					 GRC_LCLCTRL_GPIO_OE2 | | 
 | 2086 | 					 GRC_LCLCTRL_GPIO_OUTPUT1 | | 
 | 2087 | 					 GRC_LCLCTRL_GPIO_OUTPUT2; | 
 | 2088 | 			if (no_gpio2) { | 
 | 2089 | 				grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | | 
 | 2090 | 						    GRC_LCLCTRL_GPIO_OUTPUT2); | 
 | 2091 | 			} | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2092 | 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 
 | 2093 | 						    grc_local_ctrl, 100); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 |  | 
 | 2095 | 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; | 
 | 2096 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2097 | 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 
 | 2098 | 						    grc_local_ctrl, 100); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2099 |  | 
 | 2100 | 			if (!no_gpio2) { | 
 | 2101 | 				grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2102 | 				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 
 | 2103 | 					    grc_local_ctrl, 100); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2104 | 			} | 
 | 2105 | 		} | 
 | 2106 | 	} else { | 
 | 2107 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 
 | 2108 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { | 
 | 2109 | 			if (tp_peer != tp && | 
 | 2110 | 			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) | 
 | 2111 | 				return; | 
 | 2112 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2113 | 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 
 | 2114 | 				    (GRC_LCLCTRL_GPIO_OE1 | | 
 | 2115 | 				     GRC_LCLCTRL_GPIO_OUTPUT1), 100); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2116 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2117 | 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 
 | 2118 | 				    GRC_LCLCTRL_GPIO_OE1, 100); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2119 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2120 | 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | | 
 | 2121 | 				    (GRC_LCLCTRL_GPIO_OE1 | | 
 | 2122 | 				     GRC_LCLCTRL_GPIO_OUTPUT1), 100); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2123 | 		} | 
 | 2124 | 	} | 
 | 2125 | } | 
 | 2126 |  | 
| Matt Carlson | e8f3f6c | 2007-07-11 19:47:55 -0700 | [diff] [blame] | 2127 | static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) | 
 | 2128 | { | 
 | 2129 | 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) | 
 | 2130 | 		return 1; | 
 | 2131 | 	else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) { | 
 | 2132 | 		if (speed != SPEED_10) | 
 | 2133 | 			return 1; | 
 | 2134 | 	} else if (speed == SPEED_10) | 
 | 2135 | 		return 1; | 
 | 2136 |  | 
 | 2137 | 	return 0; | 
 | 2138 | } | 
 | 2139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2140 | static int tg3_setup_phy(struct tg3 *, int); | 
 | 2141 |  | 
 | 2142 | #define RESET_KIND_SHUTDOWN	0 | 
 | 2143 | #define RESET_KIND_INIT		1 | 
 | 2144 | #define RESET_KIND_SUSPEND	2 | 
 | 2145 |  | 
 | 2146 | static void tg3_write_sig_post_reset(struct tg3 *, int); | 
 | 2147 | static int tg3_halt_cpu(struct tg3 *, u32); | 
 | 2148 |  | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2149 | static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) | 
| Michael Chan | 15c3b69 | 2006-03-22 01:06:52 -0800 | [diff] [blame] | 2150 | { | 
| Matt Carlson | ce057f0 | 2007-11-12 21:08:03 -0800 | [diff] [blame] | 2151 | 	u32 val; | 
 | 2152 |  | 
| Michael Chan | 5129724 | 2007-02-13 12:17:57 -0800 | [diff] [blame] | 2153 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 
 | 2154 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 
 | 2155 | 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); | 
 | 2156 | 			u32 serdes_cfg = tr32(MAC_SERDES_CFG); | 
 | 2157 |  | 
 | 2158 | 			sg_dig_ctrl |= | 
 | 2159 | 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; | 
 | 2160 | 			tw32(SG_DIG_CTRL, sg_dig_ctrl); | 
 | 2161 | 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); | 
 | 2162 | 		} | 
| Michael Chan | 3f7045c | 2006-09-27 16:02:29 -0700 | [diff] [blame] | 2163 | 		return; | 
| Michael Chan | 5129724 | 2007-02-13 12:17:57 -0800 | [diff] [blame] | 2164 | 	} | 
| Michael Chan | 3f7045c | 2006-09-27 16:02:29 -0700 | [diff] [blame] | 2165 |  | 
| Michael Chan | 60189dd | 2006-12-17 17:08:07 -0800 | [diff] [blame] | 2166 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
| Michael Chan | 60189dd | 2006-12-17 17:08:07 -0800 | [diff] [blame] | 2167 | 		tg3_bmcr_reset(tp); | 
 | 2168 | 		val = tr32(GRC_MISC_CFG); | 
 | 2169 | 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); | 
 | 2170 | 		udelay(40); | 
 | 2171 | 		return; | 
| Matt Carlson | 0e5f784 | 2009-11-02 14:26:38 +0000 | [diff] [blame] | 2172 | 	} else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | 
 | 2173 | 		u32 phytest; | 
 | 2174 | 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { | 
 | 2175 | 			u32 phy; | 
 | 2176 |  | 
 | 2177 | 			tg3_writephy(tp, MII_ADVERTISE, 0); | 
 | 2178 | 			tg3_writephy(tp, MII_BMCR, | 
 | 2179 | 				     BMCR_ANENABLE | BMCR_ANRESTART); | 
 | 2180 |  | 
 | 2181 | 			tg3_writephy(tp, MII_TG3_FET_TEST, | 
 | 2182 | 				     phytest | MII_TG3_FET_SHADOW_EN); | 
 | 2183 | 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { | 
 | 2184 | 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; | 
 | 2185 | 				tg3_writephy(tp, | 
 | 2186 | 					     MII_TG3_FET_SHDW_AUXMODE4, | 
 | 2187 | 					     phy); | 
 | 2188 | 			} | 
 | 2189 | 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest); | 
 | 2190 | 		} | 
 | 2191 | 		return; | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2192 | 	} else if (do_low_power) { | 
| Michael Chan | 715116a | 2006-09-27 16:09:25 -0700 | [diff] [blame] | 2193 | 		tg3_writephy(tp, MII_TG3_EXT_CTRL, | 
 | 2194 | 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF); | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2195 |  | 
 | 2196 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, | 
 | 2197 | 			     MII_TG3_AUXCTL_SHDWSEL_PWRCTL | | 
 | 2198 | 			     MII_TG3_AUXCTL_PCTL_100TX_LPWR | | 
 | 2199 | 			     MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | | 
 | 2200 | 			     MII_TG3_AUXCTL_PCTL_VREG_11V); | 
| Michael Chan | 715116a | 2006-09-27 16:09:25 -0700 | [diff] [blame] | 2201 | 	} | 
| Michael Chan | 3f7045c | 2006-09-27 16:02:29 -0700 | [diff] [blame] | 2202 |  | 
| Michael Chan | 15c3b69 | 2006-03-22 01:06:52 -0800 | [diff] [blame] | 2203 | 	/* The PHY should not be powered down on some chips because | 
 | 2204 | 	 * of bugs. | 
 | 2205 | 	 */ | 
 | 2206 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 2207 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 
 | 2208 | 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && | 
 | 2209 | 	     (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) | 
 | 2210 | 		return; | 
| Matt Carlson | ce057f0 | 2007-11-12 21:08:03 -0800 | [diff] [blame] | 2211 |  | 
| Matt Carlson | bcb37f6 | 2008-11-03 16:52:09 -0800 | [diff] [blame] | 2212 | 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || | 
 | 2213 | 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { | 
| Matt Carlson | ce057f0 | 2007-11-12 21:08:03 -0800 | [diff] [blame] | 2214 | 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK); | 
 | 2215 | 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; | 
 | 2216 | 		val |= CPMU_LSPD_1000MB_MACCLK_12_5; | 
 | 2217 | 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); | 
 | 2218 | 	} | 
 | 2219 |  | 
| Michael Chan | 15c3b69 | 2006-03-22 01:06:52 -0800 | [diff] [blame] | 2220 | 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); | 
 | 2221 | } | 
 | 2222 |  | 
| Matt Carlson | 3f00789 | 2008-11-03 16:51:36 -0800 | [diff] [blame] | 2223 | /* tp->lock is held. */ | 
| Matt Carlson | ffbcfed | 2009-02-25 14:24:28 +0000 | [diff] [blame] | 2224 | static int tg3_nvram_lock(struct tg3 *tp) | 
 | 2225 | { | 
 | 2226 | 	if (tp->tg3_flags & TG3_FLAG_NVRAM) { | 
 | 2227 | 		int i; | 
 | 2228 |  | 
 | 2229 | 		if (tp->nvram_lock_cnt == 0) { | 
 | 2230 | 			tw32(NVRAM_SWARB, SWARB_REQ_SET1); | 
 | 2231 | 			for (i = 0; i < 8000; i++) { | 
 | 2232 | 				if (tr32(NVRAM_SWARB) & SWARB_GNT1) | 
 | 2233 | 					break; | 
 | 2234 | 				udelay(20); | 
 | 2235 | 			} | 
 | 2236 | 			if (i == 8000) { | 
 | 2237 | 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1); | 
 | 2238 | 				return -ENODEV; | 
 | 2239 | 			} | 
 | 2240 | 		} | 
 | 2241 | 		tp->nvram_lock_cnt++; | 
 | 2242 | 	} | 
 | 2243 | 	return 0; | 
 | 2244 | } | 
 | 2245 |  | 
 | 2246 | /* tp->lock is held. */ | 
 | 2247 | static void tg3_nvram_unlock(struct tg3 *tp) | 
 | 2248 | { | 
 | 2249 | 	if (tp->tg3_flags & TG3_FLAG_NVRAM) { | 
 | 2250 | 		if (tp->nvram_lock_cnt > 0) | 
 | 2251 | 			tp->nvram_lock_cnt--; | 
 | 2252 | 		if (tp->nvram_lock_cnt == 0) | 
 | 2253 | 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); | 
 | 2254 | 	} | 
 | 2255 | } | 
 | 2256 |  | 
 | 2257 | /* tp->lock is held. */ | 
 | 2258 | static void tg3_enable_nvram_access(struct tg3 *tp) | 
 | 2259 | { | 
 | 2260 | 	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 
| Matt Carlson | f66a29b | 2009-11-13 13:03:36 +0000 | [diff] [blame] | 2261 | 	    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { | 
| Matt Carlson | ffbcfed | 2009-02-25 14:24:28 +0000 | [diff] [blame] | 2262 | 		u32 nvaccess = tr32(NVRAM_ACCESS); | 
 | 2263 |  | 
 | 2264 | 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); | 
 | 2265 | 	} | 
 | 2266 | } | 
 | 2267 |  | 
 | 2268 | /* tp->lock is held. */ | 
 | 2269 | static void tg3_disable_nvram_access(struct tg3 *tp) | 
 | 2270 | { | 
 | 2271 | 	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 
| Matt Carlson | f66a29b | 2009-11-13 13:03:36 +0000 | [diff] [blame] | 2272 | 	    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { | 
| Matt Carlson | ffbcfed | 2009-02-25 14:24:28 +0000 | [diff] [blame] | 2273 | 		u32 nvaccess = tr32(NVRAM_ACCESS); | 
 | 2274 |  | 
 | 2275 | 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); | 
 | 2276 | 	} | 
 | 2277 | } | 
 | 2278 |  | 
 | 2279 | static int tg3_nvram_read_using_eeprom(struct tg3 *tp, | 
 | 2280 | 					u32 offset, u32 *val) | 
 | 2281 | { | 
 | 2282 | 	u32 tmp; | 
 | 2283 | 	int i; | 
 | 2284 |  | 
 | 2285 | 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) | 
 | 2286 | 		return -EINVAL; | 
 | 2287 |  | 
 | 2288 | 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | | 
 | 2289 | 					EEPROM_ADDR_DEVID_MASK | | 
 | 2290 | 					EEPROM_ADDR_READ); | 
 | 2291 | 	tw32(GRC_EEPROM_ADDR, | 
 | 2292 | 	     tmp | | 
 | 2293 | 	     (0 << EEPROM_ADDR_DEVID_SHIFT) | | 
 | 2294 | 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) & | 
 | 2295 | 	      EEPROM_ADDR_ADDR_MASK) | | 
 | 2296 | 	     EEPROM_ADDR_READ | EEPROM_ADDR_START); | 
 | 2297 |  | 
 | 2298 | 	for (i = 0; i < 1000; i++) { | 
 | 2299 | 		tmp = tr32(GRC_EEPROM_ADDR); | 
 | 2300 |  | 
 | 2301 | 		if (tmp & EEPROM_ADDR_COMPLETE) | 
 | 2302 | 			break; | 
 | 2303 | 		msleep(1); | 
 | 2304 | 	} | 
 | 2305 | 	if (!(tmp & EEPROM_ADDR_COMPLETE)) | 
 | 2306 | 		return -EBUSY; | 
 | 2307 |  | 
| Matt Carlson | 62cedd1 | 2009-04-20 14:52:29 -0700 | [diff] [blame] | 2308 | 	tmp = tr32(GRC_EEPROM_DATA); | 
 | 2309 |  | 
 | 2310 | 	/* | 
 | 2311 | 	 * The data will always be opposite the native endian | 
 | 2312 | 	 * format.  Perform a blind byteswap to compensate. | 
 | 2313 | 	 */ | 
 | 2314 | 	*val = swab32(tmp); | 
 | 2315 |  | 
| Matt Carlson | ffbcfed | 2009-02-25 14:24:28 +0000 | [diff] [blame] | 2316 | 	return 0; | 
 | 2317 | } | 
 | 2318 |  | 
 | 2319 | #define NVRAM_CMD_TIMEOUT 10000 | 
 | 2320 |  | 
 | 2321 | static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) | 
 | 2322 | { | 
 | 2323 | 	int i; | 
 | 2324 |  | 
 | 2325 | 	tw32(NVRAM_CMD, nvram_cmd); | 
 | 2326 | 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { | 
 | 2327 | 		udelay(10); | 
 | 2328 | 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { | 
 | 2329 | 			udelay(10); | 
 | 2330 | 			break; | 
 | 2331 | 		} | 
 | 2332 | 	} | 
 | 2333 |  | 
 | 2334 | 	if (i == NVRAM_CMD_TIMEOUT) | 
 | 2335 | 		return -EBUSY; | 
 | 2336 |  | 
 | 2337 | 	return 0; | 
 | 2338 | } | 
 | 2339 |  | 
 | 2340 | static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) | 
 | 2341 | { | 
 | 2342 | 	if ((tp->tg3_flags & TG3_FLAG_NVRAM) && | 
 | 2343 | 	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && | 
 | 2344 | 	    (tp->tg3_flags2 & TG3_FLG2_FLASH) && | 
 | 2345 | 	   !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && | 
 | 2346 | 	    (tp->nvram_jedecnum == JEDEC_ATMEL)) | 
 | 2347 |  | 
 | 2348 | 		addr = ((addr / tp->nvram_pagesize) << | 
 | 2349 | 			ATMEL_AT45DB0X1B_PAGE_POS) + | 
 | 2350 | 		       (addr % tp->nvram_pagesize); | 
 | 2351 |  | 
 | 2352 | 	return addr; | 
 | 2353 | } | 
 | 2354 |  | 
 | 2355 | static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) | 
 | 2356 | { | 
 | 2357 | 	if ((tp->tg3_flags & TG3_FLAG_NVRAM) && | 
 | 2358 | 	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && | 
 | 2359 | 	    (tp->tg3_flags2 & TG3_FLG2_FLASH) && | 
 | 2360 | 	   !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && | 
 | 2361 | 	    (tp->nvram_jedecnum == JEDEC_ATMEL)) | 
 | 2362 |  | 
 | 2363 | 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * | 
 | 2364 | 			tp->nvram_pagesize) + | 
 | 2365 | 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); | 
 | 2366 |  | 
 | 2367 | 	return addr; | 
 | 2368 | } | 
 | 2369 |  | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 2370 | /* NOTE: Data read in from NVRAM is byteswapped according to | 
 | 2371 |  * the byteswapping settings for all other register accesses. | 
 | 2372 |  * tg3 devices are BE devices, so on a BE machine, the data | 
 | 2373 |  * returned will be exactly as it is seen in NVRAM.  On a LE | 
 | 2374 |  * machine, the 32-bit value will be byteswapped. | 
 | 2375 |  */ | 
| Matt Carlson | ffbcfed | 2009-02-25 14:24:28 +0000 | [diff] [blame] | 2376 | static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) | 
 | 2377 | { | 
 | 2378 | 	int ret; | 
 | 2379 |  | 
 | 2380 | 	if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) | 
 | 2381 | 		return tg3_nvram_read_using_eeprom(tp, offset, val); | 
 | 2382 |  | 
 | 2383 | 	offset = tg3_nvram_phys_addr(tp, offset); | 
 | 2384 |  | 
 | 2385 | 	if (offset > NVRAM_ADDR_MSK) | 
 | 2386 | 		return -EINVAL; | 
 | 2387 |  | 
 | 2388 | 	ret = tg3_nvram_lock(tp); | 
 | 2389 | 	if (ret) | 
 | 2390 | 		return ret; | 
 | 2391 |  | 
 | 2392 | 	tg3_enable_nvram_access(tp); | 
 | 2393 |  | 
 | 2394 | 	tw32(NVRAM_ADDR, offset); | 
 | 2395 | 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | | 
 | 2396 | 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); | 
 | 2397 |  | 
 | 2398 | 	if (ret == 0) | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 2399 | 		*val = tr32(NVRAM_RDDATA); | 
| Matt Carlson | ffbcfed | 2009-02-25 14:24:28 +0000 | [diff] [blame] | 2400 |  | 
 | 2401 | 	tg3_disable_nvram_access(tp); | 
 | 2402 |  | 
 | 2403 | 	tg3_nvram_unlock(tp); | 
 | 2404 |  | 
 | 2405 | 	return ret; | 
 | 2406 | } | 
 | 2407 |  | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 2408 | /* Ensures NVRAM data is in bytestream format. */ | 
 | 2409 | static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) | 
| Matt Carlson | ffbcfed | 2009-02-25 14:24:28 +0000 | [diff] [blame] | 2410 | { | 
 | 2411 | 	u32 v; | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 2412 | 	int res = tg3_nvram_read(tp, offset, &v); | 
| Matt Carlson | ffbcfed | 2009-02-25 14:24:28 +0000 | [diff] [blame] | 2413 | 	if (!res) | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 2414 | 		*val = cpu_to_be32(v); | 
| Matt Carlson | ffbcfed | 2009-02-25 14:24:28 +0000 | [diff] [blame] | 2415 | 	return res; | 
 | 2416 | } | 
 | 2417 |  | 
 | 2418 | /* tp->lock is held. */ | 
| Matt Carlson | 3f00789 | 2008-11-03 16:51:36 -0800 | [diff] [blame] | 2419 | static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) | 
 | 2420 | { | 
 | 2421 | 	u32 addr_high, addr_low; | 
 | 2422 | 	int i; | 
 | 2423 |  | 
 | 2424 | 	addr_high = ((tp->dev->dev_addr[0] << 8) | | 
 | 2425 | 		     tp->dev->dev_addr[1]); | 
 | 2426 | 	addr_low = ((tp->dev->dev_addr[2] << 24) | | 
 | 2427 | 		    (tp->dev->dev_addr[3] << 16) | | 
 | 2428 | 		    (tp->dev->dev_addr[4] <<  8) | | 
 | 2429 | 		    (tp->dev->dev_addr[5] <<  0)); | 
 | 2430 | 	for (i = 0; i < 4; i++) { | 
 | 2431 | 		if (i == 1 && skip_mac_1) | 
 | 2432 | 			continue; | 
 | 2433 | 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); | 
 | 2434 | 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); | 
 | 2435 | 	} | 
 | 2436 |  | 
 | 2437 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 
 | 2438 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 
 | 2439 | 		for (i = 0; i < 12; i++) { | 
 | 2440 | 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); | 
 | 2441 | 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); | 
 | 2442 | 		} | 
 | 2443 | 	} | 
 | 2444 |  | 
 | 2445 | 	addr_high = (tp->dev->dev_addr[0] + | 
 | 2446 | 		     tp->dev->dev_addr[1] + | 
 | 2447 | 		     tp->dev->dev_addr[2] + | 
 | 2448 | 		     tp->dev->dev_addr[3] + | 
 | 2449 | 		     tp->dev->dev_addr[4] + | 
 | 2450 | 		     tp->dev->dev_addr[5]) & | 
 | 2451 | 		TX_BACKOFF_SEED_MASK; | 
 | 2452 | 	tw32(MAC_TX_BACKOFF_SEED, addr_high); | 
 | 2453 | } | 
 | 2454 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 2455 | static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2456 | { | 
 | 2457 | 	u32 misc_host_ctrl; | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2458 | 	bool device_should_wake, do_low_power; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2459 |  | 
 | 2460 | 	/* Make sure register accesses (indirect or otherwise) | 
 | 2461 | 	 * will function correctly. | 
 | 2462 | 	 */ | 
 | 2463 | 	pci_write_config_dword(tp->pdev, | 
 | 2464 | 			       TG3PCI_MISC_HOST_CTRL, | 
 | 2465 | 			       tp->misc_host_ctrl); | 
 | 2466 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2467 | 	switch (state) { | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 2468 | 	case PCI_D0: | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 2469 | 		pci_enable_wake(tp->pdev, state, false); | 
 | 2470 | 		pci_set_power_state(tp->pdev, PCI_D0); | 
| Michael Chan | 8c6bda1 | 2005-04-21 17:09:08 -0700 | [diff] [blame] | 2471 |  | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 2472 | 		/* Switch out of Vaux if it is a NIC */ | 
 | 2473 | 		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2474 | 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2475 |  | 
 | 2476 | 		return 0; | 
 | 2477 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 2478 | 	case PCI_D1: | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 2479 | 	case PCI_D2: | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 2480 | 	case PCI_D3hot: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2481 | 		break; | 
 | 2482 |  | 
 | 2483 | 	default: | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 2484 | 		printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n", | 
 | 2485 | 			tp->dev->name, state); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2486 | 		return -EINVAL; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 2487 | 	} | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 2488 |  | 
 | 2489 | 	/* Restore the CLKREQ setting. */ | 
 | 2490 | 	if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { | 
 | 2491 | 		u16 lnkctl; | 
 | 2492 |  | 
 | 2493 | 		pci_read_config_word(tp->pdev, | 
 | 2494 | 				     tp->pcie_cap + PCI_EXP_LNKCTL, | 
 | 2495 | 				     &lnkctl); | 
 | 2496 | 		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; | 
 | 2497 | 		pci_write_config_word(tp->pdev, | 
 | 2498 | 				      tp->pcie_cap + PCI_EXP_LNKCTL, | 
 | 2499 | 				      lnkctl); | 
 | 2500 | 	} | 
 | 2501 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2502 | 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); | 
 | 2503 | 	tw32(TG3PCI_MISC_HOST_CTRL, | 
 | 2504 | 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); | 
 | 2505 |  | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 2506 | 	device_should_wake = pci_pme_capable(tp->pdev, state) && | 
 | 2507 | 			     device_may_wakeup(&tp->pdev->dev) && | 
 | 2508 | 			     (tp->tg3_flags & TG3_FLAG_WOL_ENABLE); | 
 | 2509 |  | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 2510 | 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2511 | 		do_low_power = false; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 2512 | 		if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) && | 
 | 2513 | 		    !tp->link_config.phy_is_low_power) { | 
 | 2514 | 			struct phy_device *phydev; | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2515 | 			u32 phyid, advertising; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 2516 |  | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 2517 | 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 2518 |  | 
 | 2519 | 			tp->link_config.phy_is_low_power = 1; | 
 | 2520 |  | 
 | 2521 | 			tp->link_config.orig_speed = phydev->speed; | 
 | 2522 | 			tp->link_config.orig_duplex = phydev->duplex; | 
 | 2523 | 			tp->link_config.orig_autoneg = phydev->autoneg; | 
 | 2524 | 			tp->link_config.orig_advertising = phydev->advertising; | 
 | 2525 |  | 
 | 2526 | 			advertising = ADVERTISED_TP | | 
 | 2527 | 				      ADVERTISED_Pause | | 
 | 2528 | 				      ADVERTISED_Autoneg | | 
 | 2529 | 				      ADVERTISED_10baseT_Half; | 
 | 2530 |  | 
 | 2531 | 			if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 2532 | 			    device_should_wake) { | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 2533 | 				if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) | 
 | 2534 | 					advertising |= | 
 | 2535 | 						ADVERTISED_100baseT_Half | | 
 | 2536 | 						ADVERTISED_100baseT_Full | | 
 | 2537 | 						ADVERTISED_10baseT_Full; | 
 | 2538 | 				else | 
 | 2539 | 					advertising |= ADVERTISED_10baseT_Full; | 
 | 2540 | 			} | 
 | 2541 |  | 
 | 2542 | 			phydev->advertising = advertising; | 
 | 2543 |  | 
 | 2544 | 			phy_start_aneg(phydev); | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2545 |  | 
 | 2546 | 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; | 
 | 2547 | 			if (phyid != TG3_PHY_ID_BCMAC131) { | 
 | 2548 | 				phyid &= TG3_PHY_OUI_MASK; | 
| Roel Kluin | f72b534 | 2009-02-18 17:42:42 -0800 | [diff] [blame] | 2549 | 				if (phyid == TG3_PHY_OUI_1 || | 
 | 2550 | 				    phyid == TG3_PHY_OUI_2 || | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2551 | 				    phyid == TG3_PHY_OUI_3) | 
 | 2552 | 					do_low_power = true; | 
 | 2553 | 			} | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 2554 | 		} | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 2555 | 	} else { | 
| Matt Carlson | 2023276 | 2008-12-21 20:18:56 -0800 | [diff] [blame] | 2556 | 		do_low_power = true; | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2557 |  | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 2558 | 		if (tp->link_config.phy_is_low_power == 0) { | 
 | 2559 | 			tp->link_config.phy_is_low_power = 1; | 
 | 2560 | 			tp->link_config.orig_speed = tp->link_config.speed; | 
 | 2561 | 			tp->link_config.orig_duplex = tp->link_config.duplex; | 
 | 2562 | 			tp->link_config.orig_autoneg = tp->link_config.autoneg; | 
 | 2563 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2564 |  | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 2565 | 		if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { | 
 | 2566 | 			tp->link_config.speed = SPEED_10; | 
 | 2567 | 			tp->link_config.duplex = DUPLEX_HALF; | 
 | 2568 | 			tp->link_config.autoneg = AUTONEG_ENABLE; | 
 | 2569 | 			tg3_setup_phy(tp, 0); | 
 | 2570 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2571 | 	} | 
 | 2572 |  | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 2573 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
 | 2574 | 		u32 val; | 
 | 2575 |  | 
 | 2576 | 		val = tr32(GRC_VCPU_EXT_CTRL); | 
 | 2577 | 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); | 
 | 2578 | 	} else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 
| Michael Chan | 6921d20 | 2005-12-13 21:15:53 -0800 | [diff] [blame] | 2579 | 		int i; | 
 | 2580 | 		u32 val; | 
 | 2581 |  | 
 | 2582 | 		for (i = 0; i < 200; i++) { | 
 | 2583 | 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); | 
 | 2584 | 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) | 
 | 2585 | 				break; | 
 | 2586 | 			msleep(1); | 
 | 2587 | 		} | 
 | 2588 | 	} | 
| Gary Zambrano | a85feb8 | 2007-05-05 11:52:19 -0700 | [diff] [blame] | 2589 | 	if (tp->tg3_flags & TG3_FLAG_WOL_CAP) | 
 | 2590 | 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | | 
 | 2591 | 						     WOL_DRV_STATE_SHUTDOWN | | 
 | 2592 | 						     WOL_DRV_WOL | | 
 | 2593 | 						     WOL_SET_MAGIC_PKT); | 
| Michael Chan | 6921d20 | 2005-12-13 21:15:53 -0800 | [diff] [blame] | 2594 |  | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 2595 | 	if (device_should_wake) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2596 | 		u32 mac_mode; | 
 | 2597 |  | 
 | 2598 | 		if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2599 | 			if (do_low_power) { | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 2600 | 				tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); | 
 | 2601 | 				udelay(40); | 
 | 2602 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2603 |  | 
| Michael Chan | 3f7045c | 2006-09-27 16:02:29 -0700 | [diff] [blame] | 2604 | 			if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) | 
 | 2605 | 				mac_mode = MAC_MODE_PORT_MODE_GMII; | 
 | 2606 | 			else | 
 | 2607 | 				mac_mode = MAC_MODE_PORT_MODE_MII; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2608 |  | 
| Matt Carlson | e8f3f6c | 2007-07-11 19:47:55 -0700 | [diff] [blame] | 2609 | 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; | 
 | 2610 | 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 
 | 2611 | 			    ASIC_REV_5700) { | 
 | 2612 | 				u32 speed = (tp->tg3_flags & | 
 | 2613 | 					     TG3_FLAG_WOL_SPEED_100MB) ? | 
 | 2614 | 					     SPEED_100 : SPEED_10; | 
 | 2615 | 				if (tg3_5700_link_polarity(tp, speed)) | 
 | 2616 | 					mac_mode |= MAC_MODE_LINK_POLARITY; | 
 | 2617 | 				else | 
 | 2618 | 					mac_mode &= ~MAC_MODE_LINK_POLARITY; | 
 | 2619 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2620 | 		} else { | 
 | 2621 | 			mac_mode = MAC_MODE_PORT_MODE_TBI; | 
 | 2622 | 		} | 
 | 2623 |  | 
| John W. Linville | cbf4685 | 2005-04-21 17:01:29 -0700 | [diff] [blame] | 2624 | 		if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2625 | 			tw32(MAC_LED_CTRL, tp->led_ctrl); | 
 | 2626 |  | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 2627 | 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; | 
 | 2628 | 		if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 
 | 2629 | 		    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) && | 
 | 2630 | 		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || | 
 | 2631 | 		     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) | 
 | 2632 | 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2633 |  | 
| Matt Carlson | 3bda125 | 2008-08-15 14:08:22 -0700 | [diff] [blame] | 2634 | 		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 
 | 2635 | 			mac_mode |= tp->mac_mode & | 
 | 2636 | 				    (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); | 
 | 2637 | 			if (mac_mode & MAC_MODE_APE_TX_EN) | 
 | 2638 | 				mac_mode |= MAC_MODE_TDE_ENABLE; | 
 | 2639 | 		} | 
 | 2640 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2641 | 		tw32_f(MAC_MODE, mac_mode); | 
 | 2642 | 		udelay(100); | 
 | 2643 |  | 
 | 2644 | 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); | 
 | 2645 | 		udelay(10); | 
 | 2646 | 	} | 
 | 2647 |  | 
 | 2648 | 	if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) && | 
 | 2649 | 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 2650 | 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { | 
 | 2651 | 		u32 base_val; | 
 | 2652 |  | 
 | 2653 | 		base_val = tp->pci_clock_ctrl; | 
 | 2654 | 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE | | 
 | 2655 | 			     CLOCK_CTRL_TXCLK_DISABLE); | 
 | 2656 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2657 | 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | | 
 | 2658 | 			    CLOCK_CTRL_PWRDOWN_PLL133, 40); | 
| Michael Chan | d7b0a85 | 2007-02-13 12:17:38 -0800 | [diff] [blame] | 2659 | 	} else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || | 
| Matt Carlson | 795d01c | 2007-10-07 23:28:17 -0700 | [diff] [blame] | 2660 | 		   (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || | 
| Michael Chan | d7b0a85 | 2007-02-13 12:17:38 -0800 | [diff] [blame] | 2661 | 		   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { | 
| Michael Chan | 4cf78e4 | 2005-07-25 12:29:19 -0700 | [diff] [blame] | 2662 | 		/* do nothing */ | 
| Michael Chan | 85e94ce | 2005-04-21 17:05:28 -0700 | [diff] [blame] | 2663 | 	} else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2664 | 		     (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { | 
 | 2665 | 		u32 newbits1, newbits2; | 
 | 2666 |  | 
 | 2667 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 2668 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 
 | 2669 | 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | | 
 | 2670 | 				    CLOCK_CTRL_TXCLK_DISABLE | | 
 | 2671 | 				    CLOCK_CTRL_ALTCLK); | 
 | 2672 | 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; | 
 | 2673 | 		} else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 
 | 2674 | 			newbits1 = CLOCK_CTRL_625_CORE; | 
 | 2675 | 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; | 
 | 2676 | 		} else { | 
 | 2677 | 			newbits1 = CLOCK_CTRL_ALTCLK; | 
 | 2678 | 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; | 
 | 2679 | 		} | 
 | 2680 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2681 | 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, | 
 | 2682 | 			    40); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2683 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2684 | 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, | 
 | 2685 | 			    40); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2686 |  | 
 | 2687 | 		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 
 | 2688 | 			u32 newbits3; | 
 | 2689 |  | 
 | 2690 | 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 2691 | 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 
 | 2692 | 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | | 
 | 2693 | 					    CLOCK_CTRL_TXCLK_DISABLE | | 
 | 2694 | 					    CLOCK_CTRL_44MHZ_CORE); | 
 | 2695 | 			} else { | 
 | 2696 | 				newbits3 = CLOCK_CTRL_44MHZ_CORE; | 
 | 2697 | 			} | 
 | 2698 |  | 
| Michael Chan | b401e9e | 2005-12-19 16:27:04 -0800 | [diff] [blame] | 2699 | 			tw32_wait_f(TG3PCI_CLOCK_CTRL, | 
 | 2700 | 				    tp->pci_clock_ctrl | newbits3, 40); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2701 | 		} | 
 | 2702 | 	} | 
 | 2703 |  | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 2704 | 	if (!(device_should_wake) && | 
| Matt Carlson | 2243584 | 2008-11-21 17:21:13 -0800 | [diff] [blame] | 2705 | 	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | 
| Matt Carlson | 0a459aa | 2008-11-03 16:54:15 -0800 | [diff] [blame] | 2706 | 		tg3_power_down_phy(tp, do_low_power); | 
| Michael Chan | 6921d20 | 2005-12-13 21:15:53 -0800 | [diff] [blame] | 2707 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2708 | 	tg3_frob_aux_power(tp); | 
 | 2709 |  | 
 | 2710 | 	/* Workaround for unstable PLL clock */ | 
 | 2711 | 	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || | 
 | 2712 | 	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { | 
 | 2713 | 		u32 val = tr32(0x7d00); | 
 | 2714 |  | 
 | 2715 | 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); | 
 | 2716 | 		tw32(0x7d00, val); | 
| Michael Chan | 6921d20 | 2005-12-13 21:15:53 -0800 | [diff] [blame] | 2717 | 		if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 2718 | 			int err; | 
 | 2719 |  | 
 | 2720 | 			err = tg3_nvram_lock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2721 | 			tg3_halt_cpu(tp, RX_CPU_BASE); | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 2722 | 			if (!err) | 
 | 2723 | 				tg3_nvram_unlock(tp); | 
| Michael Chan | 6921d20 | 2005-12-13 21:15:53 -0800 | [diff] [blame] | 2724 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2725 | 	} | 
 | 2726 |  | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 2727 | 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); | 
 | 2728 |  | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 2729 | 	if (device_should_wake) | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 2730 | 		pci_enable_wake(tp->pdev, state, true); | 
 | 2731 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2732 | 	/* Finally, set the new power state. */ | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 2733 | 	pci_set_power_state(tp->pdev, state); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2734 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2735 | 	return 0; | 
 | 2736 | } | 
 | 2737 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2738 | static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) | 
 | 2739 | { | 
 | 2740 | 	switch (val & MII_TG3_AUX_STAT_SPDMASK) { | 
 | 2741 | 	case MII_TG3_AUX_STAT_10HALF: | 
 | 2742 | 		*speed = SPEED_10; | 
 | 2743 | 		*duplex = DUPLEX_HALF; | 
 | 2744 | 		break; | 
 | 2745 |  | 
 | 2746 | 	case MII_TG3_AUX_STAT_10FULL: | 
 | 2747 | 		*speed = SPEED_10; | 
 | 2748 | 		*duplex = DUPLEX_FULL; | 
 | 2749 | 		break; | 
 | 2750 |  | 
 | 2751 | 	case MII_TG3_AUX_STAT_100HALF: | 
 | 2752 | 		*speed = SPEED_100; | 
 | 2753 | 		*duplex = DUPLEX_HALF; | 
 | 2754 | 		break; | 
 | 2755 |  | 
 | 2756 | 	case MII_TG3_AUX_STAT_100FULL: | 
 | 2757 | 		*speed = SPEED_100; | 
 | 2758 | 		*duplex = DUPLEX_FULL; | 
 | 2759 | 		break; | 
 | 2760 |  | 
 | 2761 | 	case MII_TG3_AUX_STAT_1000HALF: | 
 | 2762 | 		*speed = SPEED_1000; | 
 | 2763 | 		*duplex = DUPLEX_HALF; | 
 | 2764 | 		break; | 
 | 2765 |  | 
 | 2766 | 	case MII_TG3_AUX_STAT_1000FULL: | 
 | 2767 | 		*speed = SPEED_1000; | 
 | 2768 | 		*duplex = DUPLEX_FULL; | 
 | 2769 | 		break; | 
 | 2770 |  | 
 | 2771 | 	default: | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 2772 | 		if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | 
| Michael Chan | 715116a | 2006-09-27 16:09:25 -0700 | [diff] [blame] | 2773 | 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : | 
 | 2774 | 				 SPEED_10; | 
 | 2775 | 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : | 
 | 2776 | 				  DUPLEX_HALF; | 
 | 2777 | 			break; | 
 | 2778 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2779 | 		*speed = SPEED_INVALID; | 
 | 2780 | 		*duplex = DUPLEX_INVALID; | 
 | 2781 | 		break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 2782 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2783 | } | 
 | 2784 |  | 
 | 2785 | static void tg3_phy_copper_begin(struct tg3 *tp) | 
 | 2786 | { | 
 | 2787 | 	u32 new_adv; | 
 | 2788 | 	int i; | 
 | 2789 |  | 
 | 2790 | 	if (tp->link_config.phy_is_low_power) { | 
 | 2791 | 		/* Entering low power mode.  Disable gigabit and | 
 | 2792 | 		 * 100baseT advertisements. | 
 | 2793 | 		 */ | 
 | 2794 | 		tg3_writephy(tp, MII_TG3_CTRL, 0); | 
 | 2795 |  | 
 | 2796 | 		new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL | | 
 | 2797 | 			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); | 
 | 2798 | 		if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) | 
 | 2799 | 			new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL); | 
 | 2800 |  | 
 | 2801 | 		tg3_writephy(tp, MII_ADVERTISE, new_adv); | 
 | 2802 | 	} else if (tp->link_config.speed == SPEED_INVALID) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2803 | 		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) | 
 | 2804 | 			tp->link_config.advertising &= | 
 | 2805 | 				~(ADVERTISED_1000baseT_Half | | 
 | 2806 | 				  ADVERTISED_1000baseT_Full); | 
 | 2807 |  | 
| Matt Carlson | ba4d07a | 2007-12-20 20:08:00 -0800 | [diff] [blame] | 2808 | 		new_adv = ADVERTISE_CSMA; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2809 | 		if (tp->link_config.advertising & ADVERTISED_10baseT_Half) | 
 | 2810 | 			new_adv |= ADVERTISE_10HALF; | 
 | 2811 | 		if (tp->link_config.advertising & ADVERTISED_10baseT_Full) | 
 | 2812 | 			new_adv |= ADVERTISE_10FULL; | 
 | 2813 | 		if (tp->link_config.advertising & ADVERTISED_100baseT_Half) | 
 | 2814 | 			new_adv |= ADVERTISE_100HALF; | 
 | 2815 | 		if (tp->link_config.advertising & ADVERTISED_100baseT_Full) | 
 | 2816 | 			new_adv |= ADVERTISE_100FULL; | 
| Matt Carlson | ba4d07a | 2007-12-20 20:08:00 -0800 | [diff] [blame] | 2817 |  | 
 | 2818 | 		new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); | 
 | 2819 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2820 | 		tg3_writephy(tp, MII_ADVERTISE, new_adv); | 
 | 2821 |  | 
 | 2822 | 		if (tp->link_config.advertising & | 
 | 2823 | 		    (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { | 
 | 2824 | 			new_adv = 0; | 
 | 2825 | 			if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) | 
 | 2826 | 				new_adv |= MII_TG3_CTRL_ADV_1000_HALF; | 
 | 2827 | 			if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) | 
 | 2828 | 				new_adv |= MII_TG3_CTRL_ADV_1000_FULL; | 
 | 2829 | 			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) && | 
 | 2830 | 			    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 
 | 2831 | 			     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) | 
 | 2832 | 				new_adv |= (MII_TG3_CTRL_AS_MASTER | | 
 | 2833 | 					    MII_TG3_CTRL_ENABLE_AS_MASTER); | 
 | 2834 | 			tg3_writephy(tp, MII_TG3_CTRL, new_adv); | 
 | 2835 | 		} else { | 
 | 2836 | 			tg3_writephy(tp, MII_TG3_CTRL, 0); | 
 | 2837 | 		} | 
 | 2838 | 	} else { | 
| Matt Carlson | ba4d07a | 2007-12-20 20:08:00 -0800 | [diff] [blame] | 2839 | 		new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); | 
 | 2840 | 		new_adv |= ADVERTISE_CSMA; | 
 | 2841 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2842 | 		/* Asking for a specific link mode. */ | 
 | 2843 | 		if (tp->link_config.speed == SPEED_1000) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2844 | 			tg3_writephy(tp, MII_ADVERTISE, new_adv); | 
 | 2845 |  | 
 | 2846 | 			if (tp->link_config.duplex == DUPLEX_FULL) | 
 | 2847 | 				new_adv = MII_TG3_CTRL_ADV_1000_FULL; | 
 | 2848 | 			else | 
 | 2849 | 				new_adv = MII_TG3_CTRL_ADV_1000_HALF; | 
 | 2850 | 			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 
 | 2851 | 			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) | 
 | 2852 | 				new_adv |= (MII_TG3_CTRL_AS_MASTER | | 
 | 2853 | 					    MII_TG3_CTRL_ENABLE_AS_MASTER); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2854 | 		} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2855 | 			if (tp->link_config.speed == SPEED_100) { | 
 | 2856 | 				if (tp->link_config.duplex == DUPLEX_FULL) | 
 | 2857 | 					new_adv |= ADVERTISE_100FULL; | 
 | 2858 | 				else | 
 | 2859 | 					new_adv |= ADVERTISE_100HALF; | 
 | 2860 | 			} else { | 
 | 2861 | 				if (tp->link_config.duplex == DUPLEX_FULL) | 
 | 2862 | 					new_adv |= ADVERTISE_10FULL; | 
 | 2863 | 				else | 
 | 2864 | 					new_adv |= ADVERTISE_10HALF; | 
 | 2865 | 			} | 
 | 2866 | 			tg3_writephy(tp, MII_ADVERTISE, new_adv); | 
| Matt Carlson | ba4d07a | 2007-12-20 20:08:00 -0800 | [diff] [blame] | 2867 |  | 
 | 2868 | 			new_adv = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2869 | 		} | 
| Matt Carlson | ba4d07a | 2007-12-20 20:08:00 -0800 | [diff] [blame] | 2870 |  | 
 | 2871 | 		tg3_writephy(tp, MII_TG3_CTRL, new_adv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2872 | 	} | 
 | 2873 |  | 
 | 2874 | 	if (tp->link_config.autoneg == AUTONEG_DISABLE && | 
 | 2875 | 	    tp->link_config.speed != SPEED_INVALID) { | 
 | 2876 | 		u32 bmcr, orig_bmcr; | 
 | 2877 |  | 
 | 2878 | 		tp->link_config.active_speed = tp->link_config.speed; | 
 | 2879 | 		tp->link_config.active_duplex = tp->link_config.duplex; | 
 | 2880 |  | 
 | 2881 | 		bmcr = 0; | 
 | 2882 | 		switch (tp->link_config.speed) { | 
 | 2883 | 		default: | 
 | 2884 | 		case SPEED_10: | 
 | 2885 | 			break; | 
 | 2886 |  | 
 | 2887 | 		case SPEED_100: | 
 | 2888 | 			bmcr |= BMCR_SPEED100; | 
 | 2889 | 			break; | 
 | 2890 |  | 
 | 2891 | 		case SPEED_1000: | 
 | 2892 | 			bmcr |= TG3_BMCR_SPEED1000; | 
 | 2893 | 			break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 2894 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2895 |  | 
 | 2896 | 		if (tp->link_config.duplex == DUPLEX_FULL) | 
 | 2897 | 			bmcr |= BMCR_FULLDPLX; | 
 | 2898 |  | 
 | 2899 | 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && | 
 | 2900 | 		    (bmcr != orig_bmcr)) { | 
 | 2901 | 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); | 
 | 2902 | 			for (i = 0; i < 1500; i++) { | 
 | 2903 | 				u32 tmp; | 
 | 2904 |  | 
 | 2905 | 				udelay(10); | 
 | 2906 | 				if (tg3_readphy(tp, MII_BMSR, &tmp) || | 
 | 2907 | 				    tg3_readphy(tp, MII_BMSR, &tmp)) | 
 | 2908 | 					continue; | 
 | 2909 | 				if (!(tmp & BMSR_LSTATUS)) { | 
 | 2910 | 					udelay(40); | 
 | 2911 | 					break; | 
 | 2912 | 				} | 
 | 2913 | 			} | 
 | 2914 | 			tg3_writephy(tp, MII_BMCR, bmcr); | 
 | 2915 | 			udelay(40); | 
 | 2916 | 		} | 
 | 2917 | 	} else { | 
 | 2918 | 		tg3_writephy(tp, MII_BMCR, | 
 | 2919 | 			     BMCR_ANENABLE | BMCR_ANRESTART); | 
 | 2920 | 	} | 
 | 2921 | } | 
 | 2922 |  | 
 | 2923 | static int tg3_init_5401phy_dsp(struct tg3 *tp) | 
 | 2924 | { | 
 | 2925 | 	int err; | 
 | 2926 |  | 
 | 2927 | 	/* Turn off tap power management. */ | 
 | 2928 | 	/* Set Extended packet length bit */ | 
 | 2929 | 	err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); | 
 | 2930 |  | 
 | 2931 | 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012); | 
 | 2932 | 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804); | 
 | 2933 |  | 
 | 2934 | 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013); | 
 | 2935 | 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204); | 
 | 2936 |  | 
 | 2937 | 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); | 
 | 2938 | 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132); | 
 | 2939 |  | 
 | 2940 | 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006); | 
 | 2941 | 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232); | 
 | 2942 |  | 
 | 2943 | 	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f); | 
 | 2944 | 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20); | 
 | 2945 |  | 
 | 2946 | 	udelay(40); | 
 | 2947 |  | 
 | 2948 | 	return err; | 
 | 2949 | } | 
 | 2950 |  | 
| Michael Chan | 3600d91 | 2006-12-07 00:21:48 -0800 | [diff] [blame] | 2951 | static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2952 | { | 
| Michael Chan | 3600d91 | 2006-12-07 00:21:48 -0800 | [diff] [blame] | 2953 | 	u32 adv_reg, all_mask = 0; | 
 | 2954 |  | 
 | 2955 | 	if (mask & ADVERTISED_10baseT_Half) | 
 | 2956 | 		all_mask |= ADVERTISE_10HALF; | 
 | 2957 | 	if (mask & ADVERTISED_10baseT_Full) | 
 | 2958 | 		all_mask |= ADVERTISE_10FULL; | 
 | 2959 | 	if (mask & ADVERTISED_100baseT_Half) | 
 | 2960 | 		all_mask |= ADVERTISE_100HALF; | 
 | 2961 | 	if (mask & ADVERTISED_100baseT_Full) | 
 | 2962 | 		all_mask |= ADVERTISE_100FULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2963 |  | 
 | 2964 | 	if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) | 
 | 2965 | 		return 0; | 
 | 2966 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2967 | 	if ((adv_reg & all_mask) != all_mask) | 
 | 2968 | 		return 0; | 
 | 2969 | 	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { | 
 | 2970 | 		u32 tg3_ctrl; | 
 | 2971 |  | 
| Michael Chan | 3600d91 | 2006-12-07 00:21:48 -0800 | [diff] [blame] | 2972 | 		all_mask = 0; | 
 | 2973 | 		if (mask & ADVERTISED_1000baseT_Half) | 
 | 2974 | 			all_mask |= ADVERTISE_1000HALF; | 
 | 2975 | 		if (mask & ADVERTISED_1000baseT_Full) | 
 | 2976 | 			all_mask |= ADVERTISE_1000FULL; | 
 | 2977 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2978 | 		if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl)) | 
 | 2979 | 			return 0; | 
 | 2980 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2981 | 		if ((tg3_ctrl & all_mask) != all_mask) | 
 | 2982 | 			return 0; | 
 | 2983 | 	} | 
 | 2984 | 	return 1; | 
 | 2985 | } | 
 | 2986 |  | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 2987 | static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) | 
 | 2988 | { | 
 | 2989 | 	u32 curadv, reqadv; | 
 | 2990 |  | 
 | 2991 | 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) | 
 | 2992 | 		return 1; | 
 | 2993 |  | 
 | 2994 | 	curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); | 
 | 2995 | 	reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); | 
 | 2996 |  | 
 | 2997 | 	if (tp->link_config.active_duplex == DUPLEX_FULL) { | 
 | 2998 | 		if (curadv != reqadv) | 
 | 2999 | 			return 0; | 
 | 3000 |  | 
 | 3001 | 		if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) | 
 | 3002 | 			tg3_readphy(tp, MII_LPA, rmtadv); | 
 | 3003 | 	} else { | 
 | 3004 | 		/* Reprogram the advertisement register, even if it | 
 | 3005 | 		 * does not affect the current link.  If the link | 
 | 3006 | 		 * gets renegotiated in the future, we can save an | 
 | 3007 | 		 * additional renegotiation cycle by advertising | 
 | 3008 | 		 * it correctly in the first place. | 
 | 3009 | 		 */ | 
 | 3010 | 		if (curadv != reqadv) { | 
 | 3011 | 			*lcladv &= ~(ADVERTISE_PAUSE_CAP | | 
 | 3012 | 				     ADVERTISE_PAUSE_ASYM); | 
 | 3013 | 			tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); | 
 | 3014 | 		} | 
 | 3015 | 	} | 
 | 3016 |  | 
 | 3017 | 	return 1; | 
 | 3018 | } | 
 | 3019 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3020 | static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) | 
 | 3021 | { | 
 | 3022 | 	int current_link_up; | 
 | 3023 | 	u32 bmsr, dummy; | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 3024 | 	u32 lcl_adv, rmt_adv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3025 | 	u16 current_speed; | 
 | 3026 | 	u8 current_duplex; | 
 | 3027 | 	int i, err; | 
 | 3028 |  | 
 | 3029 | 	tw32(MAC_EVENT, 0); | 
 | 3030 |  | 
 | 3031 | 	tw32_f(MAC_STATUS, | 
 | 3032 | 	     (MAC_STATUS_SYNC_CHANGED | | 
 | 3033 | 	      MAC_STATUS_CFG_CHANGED | | 
 | 3034 | 	      MAC_STATUS_MI_COMPLETION | | 
 | 3035 | 	      MAC_STATUS_LNKSTATE_CHANGED)); | 
 | 3036 | 	udelay(40); | 
 | 3037 |  | 
| Matt Carlson | 8ef2142 | 2008-05-02 16:47:53 -0700 | [diff] [blame] | 3038 | 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { | 
 | 3039 | 		tw32_f(MAC_MI_MODE, | 
 | 3040 | 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); | 
 | 3041 | 		udelay(80); | 
 | 3042 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3043 |  | 
 | 3044 | 	tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02); | 
 | 3045 |  | 
 | 3046 | 	/* Some third-party PHYs need to be reset on link going | 
 | 3047 | 	 * down. | 
 | 3048 | 	 */ | 
 | 3049 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 
 | 3050 | 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 
 | 3051 | 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && | 
 | 3052 | 	    netif_carrier_ok(tp->dev)) { | 
 | 3053 | 		tg3_readphy(tp, MII_BMSR, &bmsr); | 
 | 3054 | 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 
 | 3055 | 		    !(bmsr & BMSR_LSTATUS)) | 
 | 3056 | 			force_reset = 1; | 
 | 3057 | 	} | 
 | 3058 | 	if (force_reset) | 
 | 3059 | 		tg3_phy_reset(tp); | 
 | 3060 |  | 
 | 3061 | 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 
 | 3062 | 		tg3_readphy(tp, MII_BMSR, &bmsr); | 
 | 3063 | 		if (tg3_readphy(tp, MII_BMSR, &bmsr) || | 
 | 3064 | 		    !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) | 
 | 3065 | 			bmsr = 0; | 
 | 3066 |  | 
 | 3067 | 		if (!(bmsr & BMSR_LSTATUS)) { | 
 | 3068 | 			err = tg3_init_5401phy_dsp(tp); | 
 | 3069 | 			if (err) | 
 | 3070 | 				return err; | 
 | 3071 |  | 
 | 3072 | 			tg3_readphy(tp, MII_BMSR, &bmsr); | 
 | 3073 | 			for (i = 0; i < 1000; i++) { | 
 | 3074 | 				udelay(10); | 
 | 3075 | 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 
 | 3076 | 				    (bmsr & BMSR_LSTATUS)) { | 
 | 3077 | 					udelay(40); | 
 | 3078 | 					break; | 
 | 3079 | 				} | 
 | 3080 | 			} | 
 | 3081 |  | 
 | 3082 | 			if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 && | 
 | 3083 | 			    !(bmsr & BMSR_LSTATUS) && | 
 | 3084 | 			    tp->link_config.active_speed == SPEED_1000) { | 
 | 3085 | 				err = tg3_phy_reset(tp); | 
 | 3086 | 				if (!err) | 
 | 3087 | 					err = tg3_init_5401phy_dsp(tp); | 
 | 3088 | 				if (err) | 
 | 3089 | 					return err; | 
 | 3090 | 			} | 
 | 3091 | 		} | 
 | 3092 | 	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 
 | 3093 | 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { | 
 | 3094 | 		/* 5701 {A0,B0} CRC bug workaround */ | 
 | 3095 | 		tg3_writephy(tp, 0x15, 0x0a75); | 
 | 3096 | 		tg3_writephy(tp, 0x1c, 0x8c68); | 
 | 3097 | 		tg3_writephy(tp, 0x1c, 0x8d68); | 
 | 3098 | 		tg3_writephy(tp, 0x1c, 0x8c68); | 
 | 3099 | 	} | 
 | 3100 |  | 
 | 3101 | 	/* Clear pending interrupts... */ | 
 | 3102 | 	tg3_readphy(tp, MII_TG3_ISTAT, &dummy); | 
 | 3103 | 	tg3_readphy(tp, MII_TG3_ISTAT, &dummy); | 
 | 3104 |  | 
 | 3105 | 	if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) | 
 | 3106 | 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 3107 | 	else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3108 | 		tg3_writephy(tp, MII_TG3_IMASK, ~0); | 
 | 3109 |  | 
 | 3110 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 3111 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 
 | 3112 | 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) | 
 | 3113 | 			tg3_writephy(tp, MII_TG3_EXT_CTRL, | 
 | 3114 | 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE); | 
 | 3115 | 		else | 
 | 3116 | 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); | 
 | 3117 | 	} | 
 | 3118 |  | 
 | 3119 | 	current_link_up = 0; | 
 | 3120 | 	current_speed = SPEED_INVALID; | 
 | 3121 | 	current_duplex = DUPLEX_INVALID; | 
 | 3122 |  | 
 | 3123 | 	if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) { | 
 | 3124 | 		u32 val; | 
 | 3125 |  | 
 | 3126 | 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); | 
 | 3127 | 		tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); | 
 | 3128 | 		if (!(val & (1 << 10))) { | 
 | 3129 | 			val |= (1 << 10); | 
 | 3130 | 			tg3_writephy(tp, MII_TG3_AUX_CTRL, val); | 
 | 3131 | 			goto relink; | 
 | 3132 | 		} | 
 | 3133 | 	} | 
 | 3134 |  | 
 | 3135 | 	bmsr = 0; | 
 | 3136 | 	for (i = 0; i < 100; i++) { | 
 | 3137 | 		tg3_readphy(tp, MII_BMSR, &bmsr); | 
 | 3138 | 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 
 | 3139 | 		    (bmsr & BMSR_LSTATUS)) | 
 | 3140 | 			break; | 
 | 3141 | 		udelay(40); | 
 | 3142 | 	} | 
 | 3143 |  | 
 | 3144 | 	if (bmsr & BMSR_LSTATUS) { | 
 | 3145 | 		u32 aux_stat, bmcr; | 
 | 3146 |  | 
 | 3147 | 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); | 
 | 3148 | 		for (i = 0; i < 2000; i++) { | 
 | 3149 | 			udelay(10); | 
 | 3150 | 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && | 
 | 3151 | 			    aux_stat) | 
 | 3152 | 				break; | 
 | 3153 | 		} | 
 | 3154 |  | 
 | 3155 | 		tg3_aux_stat_to_speed_duplex(tp, aux_stat, | 
 | 3156 | 					     ¤t_speed, | 
 | 3157 | 					     ¤t_duplex); | 
 | 3158 |  | 
 | 3159 | 		bmcr = 0; | 
 | 3160 | 		for (i = 0; i < 200; i++) { | 
 | 3161 | 			tg3_readphy(tp, MII_BMCR, &bmcr); | 
 | 3162 | 			if (tg3_readphy(tp, MII_BMCR, &bmcr)) | 
 | 3163 | 				continue; | 
 | 3164 | 			if (bmcr && bmcr != 0x7fff) | 
 | 3165 | 				break; | 
 | 3166 | 			udelay(10); | 
 | 3167 | 		} | 
 | 3168 |  | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 3169 | 		lcl_adv = 0; | 
 | 3170 | 		rmt_adv = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3171 |  | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 3172 | 		tp->link_config.active_speed = current_speed; | 
 | 3173 | 		tp->link_config.active_duplex = current_duplex; | 
 | 3174 |  | 
 | 3175 | 		if (tp->link_config.autoneg == AUTONEG_ENABLE) { | 
 | 3176 | 			if ((bmcr & BMCR_ANENABLE) && | 
 | 3177 | 			    tg3_copper_is_advertising_all(tp, | 
 | 3178 | 						tp->link_config.advertising)) { | 
 | 3179 | 				if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, | 
 | 3180 | 								  &rmt_adv)) | 
 | 3181 | 					current_link_up = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3182 | 			} | 
 | 3183 | 		} else { | 
 | 3184 | 			if (!(bmcr & BMCR_ANENABLE) && | 
 | 3185 | 			    tp->link_config.speed == current_speed && | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 3186 | 			    tp->link_config.duplex == current_duplex && | 
 | 3187 | 			    tp->link_config.flowctrl == | 
 | 3188 | 			    tp->link_config.active_flowctrl) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3189 | 				current_link_up = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3190 | 			} | 
 | 3191 | 		} | 
 | 3192 |  | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 3193 | 		if (current_link_up == 1 && | 
 | 3194 | 		    tp->link_config.active_duplex == DUPLEX_FULL) | 
 | 3195 | 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3196 | 	} | 
 | 3197 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3198 | relink: | 
| Michael Chan | 6921d20 | 2005-12-13 21:15:53 -0800 | [diff] [blame] | 3199 | 	if (current_link_up == 0 || tp->link_config.phy_is_low_power) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3200 | 		u32 tmp; | 
 | 3201 |  | 
 | 3202 | 		tg3_phy_copper_begin(tp); | 
 | 3203 |  | 
 | 3204 | 		tg3_readphy(tp, MII_BMSR, &tmp); | 
 | 3205 | 		if (!tg3_readphy(tp, MII_BMSR, &tmp) && | 
 | 3206 | 		    (tmp & BMSR_LSTATUS)) | 
 | 3207 | 			current_link_up = 1; | 
 | 3208 | 	} | 
 | 3209 |  | 
 | 3210 | 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; | 
 | 3211 | 	if (current_link_up == 1) { | 
 | 3212 | 		if (tp->link_config.active_speed == SPEED_100 || | 
 | 3213 | 		    tp->link_config.active_speed == SPEED_10) | 
 | 3214 | 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII; | 
 | 3215 | 		else | 
 | 3216 | 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 3217 | 	} else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) | 
 | 3218 | 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII; | 
 | 3219 | 	else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3220 | 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; | 
 | 3221 |  | 
 | 3222 | 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; | 
 | 3223 | 	if (tp->link_config.active_duplex == DUPLEX_HALF) | 
 | 3224 | 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX; | 
 | 3225 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3226 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { | 
| Matt Carlson | e8f3f6c | 2007-07-11 19:47:55 -0700 | [diff] [blame] | 3227 | 		if (current_link_up == 1 && | 
 | 3228 | 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3229 | 			tp->mac_mode |= MAC_MODE_LINK_POLARITY; | 
| Matt Carlson | e8f3f6c | 2007-07-11 19:47:55 -0700 | [diff] [blame] | 3230 | 		else | 
 | 3231 | 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3232 | 	} | 
 | 3233 |  | 
 | 3234 | 	/* ??? Without this setting Netgear GA302T PHY does not | 
 | 3235 | 	 * ??? send/receive packets... | 
 | 3236 | 	 */ | 
 | 3237 | 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 && | 
 | 3238 | 	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { | 
 | 3239 | 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; | 
 | 3240 | 		tw32_f(MAC_MI_MODE, tp->mi_mode); | 
 | 3241 | 		udelay(80); | 
 | 3242 | 	} | 
 | 3243 |  | 
 | 3244 | 	tw32_f(MAC_MODE, tp->mac_mode); | 
 | 3245 | 	udelay(40); | 
 | 3246 |  | 
 | 3247 | 	if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { | 
 | 3248 | 		/* Polled via timer. */ | 
 | 3249 | 		tw32_f(MAC_EVENT, 0); | 
 | 3250 | 	} else { | 
 | 3251 | 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); | 
 | 3252 | 	} | 
 | 3253 | 	udelay(40); | 
 | 3254 |  | 
 | 3255 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && | 
 | 3256 | 	    current_link_up == 1 && | 
 | 3257 | 	    tp->link_config.active_speed == SPEED_1000 && | 
 | 3258 | 	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) || | 
 | 3259 | 	     (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) { | 
 | 3260 | 		udelay(120); | 
 | 3261 | 		tw32_f(MAC_STATUS, | 
 | 3262 | 		     (MAC_STATUS_SYNC_CHANGED | | 
 | 3263 | 		      MAC_STATUS_CFG_CHANGED)); | 
 | 3264 | 		udelay(40); | 
 | 3265 | 		tg3_write_mem(tp, | 
 | 3266 | 			      NIC_SRAM_FIRMWARE_MBOX, | 
 | 3267 | 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2); | 
 | 3268 | 	} | 
 | 3269 |  | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 3270 | 	/* Prevent send BD corruption. */ | 
 | 3271 | 	if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { | 
 | 3272 | 		u16 oldlnkctl, newlnkctl; | 
 | 3273 |  | 
 | 3274 | 		pci_read_config_word(tp->pdev, | 
 | 3275 | 				     tp->pcie_cap + PCI_EXP_LNKCTL, | 
 | 3276 | 				     &oldlnkctl); | 
 | 3277 | 		if (tp->link_config.active_speed == SPEED_100 || | 
 | 3278 | 		    tp->link_config.active_speed == SPEED_10) | 
 | 3279 | 			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; | 
 | 3280 | 		else | 
 | 3281 | 			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; | 
 | 3282 | 		if (newlnkctl != oldlnkctl) | 
 | 3283 | 			pci_write_config_word(tp->pdev, | 
 | 3284 | 					      tp->pcie_cap + PCI_EXP_LNKCTL, | 
 | 3285 | 					      newlnkctl); | 
 | 3286 | 	} | 
 | 3287 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3288 | 	if (current_link_up != netif_carrier_ok(tp->dev)) { | 
 | 3289 | 		if (current_link_up) | 
 | 3290 | 			netif_carrier_on(tp->dev); | 
 | 3291 | 		else | 
 | 3292 | 			netif_carrier_off(tp->dev); | 
 | 3293 | 		tg3_link_report(tp); | 
 | 3294 | 	} | 
 | 3295 |  | 
 | 3296 | 	return 0; | 
 | 3297 | } | 
 | 3298 |  | 
 | 3299 | struct tg3_fiber_aneginfo { | 
 | 3300 | 	int state; | 
 | 3301 | #define ANEG_STATE_UNKNOWN		0 | 
 | 3302 | #define ANEG_STATE_AN_ENABLE		1 | 
 | 3303 | #define ANEG_STATE_RESTART_INIT		2 | 
 | 3304 | #define ANEG_STATE_RESTART		3 | 
 | 3305 | #define ANEG_STATE_DISABLE_LINK_OK	4 | 
 | 3306 | #define ANEG_STATE_ABILITY_DETECT_INIT	5 | 
 | 3307 | #define ANEG_STATE_ABILITY_DETECT	6 | 
 | 3308 | #define ANEG_STATE_ACK_DETECT_INIT	7 | 
 | 3309 | #define ANEG_STATE_ACK_DETECT		8 | 
 | 3310 | #define ANEG_STATE_COMPLETE_ACK_INIT	9 | 
 | 3311 | #define ANEG_STATE_COMPLETE_ACK		10 | 
 | 3312 | #define ANEG_STATE_IDLE_DETECT_INIT	11 | 
 | 3313 | #define ANEG_STATE_IDLE_DETECT		12 | 
 | 3314 | #define ANEG_STATE_LINK_OK		13 | 
 | 3315 | #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14 | 
 | 3316 | #define ANEG_STATE_NEXT_PAGE_WAIT	15 | 
 | 3317 |  | 
 | 3318 | 	u32 flags; | 
 | 3319 | #define MR_AN_ENABLE		0x00000001 | 
 | 3320 | #define MR_RESTART_AN		0x00000002 | 
 | 3321 | #define MR_AN_COMPLETE		0x00000004 | 
 | 3322 | #define MR_PAGE_RX		0x00000008 | 
 | 3323 | #define MR_NP_LOADED		0x00000010 | 
 | 3324 | #define MR_TOGGLE_TX		0x00000020 | 
 | 3325 | #define MR_LP_ADV_FULL_DUPLEX	0x00000040 | 
 | 3326 | #define MR_LP_ADV_HALF_DUPLEX	0x00000080 | 
 | 3327 | #define MR_LP_ADV_SYM_PAUSE	0x00000100 | 
 | 3328 | #define MR_LP_ADV_ASYM_PAUSE	0x00000200 | 
 | 3329 | #define MR_LP_ADV_REMOTE_FAULT1	0x00000400 | 
 | 3330 | #define MR_LP_ADV_REMOTE_FAULT2	0x00000800 | 
 | 3331 | #define MR_LP_ADV_NEXT_PAGE	0x00001000 | 
 | 3332 | #define MR_TOGGLE_RX		0x00002000 | 
 | 3333 | #define MR_NP_RX		0x00004000 | 
 | 3334 |  | 
 | 3335 | #define MR_LINK_OK		0x80000000 | 
 | 3336 |  | 
 | 3337 | 	unsigned long link_time, cur_time; | 
 | 3338 |  | 
 | 3339 | 	u32 ability_match_cfg; | 
 | 3340 | 	int ability_match_count; | 
 | 3341 |  | 
 | 3342 | 	char ability_match, idle_match, ack_match; | 
 | 3343 |  | 
 | 3344 | 	u32 txconfig, rxconfig; | 
 | 3345 | #define ANEG_CFG_NP		0x00000080 | 
 | 3346 | #define ANEG_CFG_ACK		0x00000040 | 
 | 3347 | #define ANEG_CFG_RF2		0x00000020 | 
 | 3348 | #define ANEG_CFG_RF1		0x00000010 | 
 | 3349 | #define ANEG_CFG_PS2		0x00000001 | 
 | 3350 | #define ANEG_CFG_PS1		0x00008000 | 
 | 3351 | #define ANEG_CFG_HD		0x00004000 | 
 | 3352 | #define ANEG_CFG_FD		0x00002000 | 
 | 3353 | #define ANEG_CFG_INVAL		0x00001f06 | 
 | 3354 |  | 
 | 3355 | }; | 
 | 3356 | #define ANEG_OK		0 | 
 | 3357 | #define ANEG_DONE	1 | 
 | 3358 | #define ANEG_TIMER_ENAB	2 | 
 | 3359 | #define ANEG_FAILED	-1 | 
 | 3360 |  | 
 | 3361 | #define ANEG_STATE_SETTLE_TIME	10000 | 
 | 3362 |  | 
 | 3363 | static int tg3_fiber_aneg_smachine(struct tg3 *tp, | 
 | 3364 | 				   struct tg3_fiber_aneginfo *ap) | 
 | 3365 | { | 
| Matt Carlson | 5be73b4 | 2007-12-20 20:09:29 -0800 | [diff] [blame] | 3366 | 	u16 flowctrl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3367 | 	unsigned long delta; | 
 | 3368 | 	u32 rx_cfg_reg; | 
 | 3369 | 	int ret; | 
 | 3370 |  | 
 | 3371 | 	if (ap->state == ANEG_STATE_UNKNOWN) { | 
 | 3372 | 		ap->rxconfig = 0; | 
 | 3373 | 		ap->link_time = 0; | 
 | 3374 | 		ap->cur_time = 0; | 
 | 3375 | 		ap->ability_match_cfg = 0; | 
 | 3376 | 		ap->ability_match_count = 0; | 
 | 3377 | 		ap->ability_match = 0; | 
 | 3378 | 		ap->idle_match = 0; | 
 | 3379 | 		ap->ack_match = 0; | 
 | 3380 | 	} | 
 | 3381 | 	ap->cur_time++; | 
 | 3382 |  | 
 | 3383 | 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { | 
 | 3384 | 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); | 
 | 3385 |  | 
 | 3386 | 		if (rx_cfg_reg != ap->ability_match_cfg) { | 
 | 3387 | 			ap->ability_match_cfg = rx_cfg_reg; | 
 | 3388 | 			ap->ability_match = 0; | 
 | 3389 | 			ap->ability_match_count = 0; | 
 | 3390 | 		} else { | 
 | 3391 | 			if (++ap->ability_match_count > 1) { | 
 | 3392 | 				ap->ability_match = 1; | 
 | 3393 | 				ap->ability_match_cfg = rx_cfg_reg; | 
 | 3394 | 			} | 
 | 3395 | 		} | 
 | 3396 | 		if (rx_cfg_reg & ANEG_CFG_ACK) | 
 | 3397 | 			ap->ack_match = 1; | 
 | 3398 | 		else | 
 | 3399 | 			ap->ack_match = 0; | 
 | 3400 |  | 
 | 3401 | 		ap->idle_match = 0; | 
 | 3402 | 	} else { | 
 | 3403 | 		ap->idle_match = 1; | 
 | 3404 | 		ap->ability_match_cfg = 0; | 
 | 3405 | 		ap->ability_match_count = 0; | 
 | 3406 | 		ap->ability_match = 0; | 
 | 3407 | 		ap->ack_match = 0; | 
 | 3408 |  | 
 | 3409 | 		rx_cfg_reg = 0; | 
 | 3410 | 	} | 
 | 3411 |  | 
 | 3412 | 	ap->rxconfig = rx_cfg_reg; | 
 | 3413 | 	ret = ANEG_OK; | 
 | 3414 |  | 
 | 3415 | 	switch(ap->state) { | 
 | 3416 | 	case ANEG_STATE_UNKNOWN: | 
 | 3417 | 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) | 
 | 3418 | 			ap->state = ANEG_STATE_AN_ENABLE; | 
 | 3419 |  | 
 | 3420 | 		/* fallthru */ | 
 | 3421 | 	case ANEG_STATE_AN_ENABLE: | 
 | 3422 | 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); | 
 | 3423 | 		if (ap->flags & MR_AN_ENABLE) { | 
 | 3424 | 			ap->link_time = 0; | 
 | 3425 | 			ap->cur_time = 0; | 
 | 3426 | 			ap->ability_match_cfg = 0; | 
 | 3427 | 			ap->ability_match_count = 0; | 
 | 3428 | 			ap->ability_match = 0; | 
 | 3429 | 			ap->idle_match = 0; | 
 | 3430 | 			ap->ack_match = 0; | 
 | 3431 |  | 
 | 3432 | 			ap->state = ANEG_STATE_RESTART_INIT; | 
 | 3433 | 		} else { | 
 | 3434 | 			ap->state = ANEG_STATE_DISABLE_LINK_OK; | 
 | 3435 | 		} | 
 | 3436 | 		break; | 
 | 3437 |  | 
 | 3438 | 	case ANEG_STATE_RESTART_INIT: | 
 | 3439 | 		ap->link_time = ap->cur_time; | 
 | 3440 | 		ap->flags &= ~(MR_NP_LOADED); | 
 | 3441 | 		ap->txconfig = 0; | 
 | 3442 | 		tw32(MAC_TX_AUTO_NEG, 0); | 
 | 3443 | 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS; | 
 | 3444 | 		tw32_f(MAC_MODE, tp->mac_mode); | 
 | 3445 | 		udelay(40); | 
 | 3446 |  | 
 | 3447 | 		ret = ANEG_TIMER_ENAB; | 
 | 3448 | 		ap->state = ANEG_STATE_RESTART; | 
 | 3449 |  | 
 | 3450 | 		/* fallthru */ | 
 | 3451 | 	case ANEG_STATE_RESTART: | 
 | 3452 | 		delta = ap->cur_time - ap->link_time; | 
 | 3453 | 		if (delta > ANEG_STATE_SETTLE_TIME) { | 
 | 3454 | 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT; | 
 | 3455 | 		} else { | 
 | 3456 | 			ret = ANEG_TIMER_ENAB; | 
 | 3457 | 		} | 
 | 3458 | 		break; | 
 | 3459 |  | 
 | 3460 | 	case ANEG_STATE_DISABLE_LINK_OK: | 
 | 3461 | 		ret = ANEG_DONE; | 
 | 3462 | 		break; | 
 | 3463 |  | 
 | 3464 | 	case ANEG_STATE_ABILITY_DETECT_INIT: | 
 | 3465 | 		ap->flags &= ~(MR_TOGGLE_TX); | 
| Matt Carlson | 5be73b4 | 2007-12-20 20:09:29 -0800 | [diff] [blame] | 3466 | 		ap->txconfig = ANEG_CFG_FD; | 
 | 3467 | 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); | 
 | 3468 | 		if (flowctrl & ADVERTISE_1000XPAUSE) | 
 | 3469 | 			ap->txconfig |= ANEG_CFG_PS1; | 
 | 3470 | 		if (flowctrl & ADVERTISE_1000XPSE_ASYM) | 
 | 3471 | 			ap->txconfig |= ANEG_CFG_PS2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3472 | 		tw32(MAC_TX_AUTO_NEG, ap->txconfig); | 
 | 3473 | 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS; | 
 | 3474 | 		tw32_f(MAC_MODE, tp->mac_mode); | 
 | 3475 | 		udelay(40); | 
 | 3476 |  | 
 | 3477 | 		ap->state = ANEG_STATE_ABILITY_DETECT; | 
 | 3478 | 		break; | 
 | 3479 |  | 
 | 3480 | 	case ANEG_STATE_ABILITY_DETECT: | 
 | 3481 | 		if (ap->ability_match != 0 && ap->rxconfig != 0) { | 
 | 3482 | 			ap->state = ANEG_STATE_ACK_DETECT_INIT; | 
 | 3483 | 		} | 
 | 3484 | 		break; | 
 | 3485 |  | 
 | 3486 | 	case ANEG_STATE_ACK_DETECT_INIT: | 
 | 3487 | 		ap->txconfig |= ANEG_CFG_ACK; | 
 | 3488 | 		tw32(MAC_TX_AUTO_NEG, ap->txconfig); | 
 | 3489 | 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS; | 
 | 3490 | 		tw32_f(MAC_MODE, tp->mac_mode); | 
 | 3491 | 		udelay(40); | 
 | 3492 |  | 
 | 3493 | 		ap->state = ANEG_STATE_ACK_DETECT; | 
 | 3494 |  | 
 | 3495 | 		/* fallthru */ | 
 | 3496 | 	case ANEG_STATE_ACK_DETECT: | 
 | 3497 | 		if (ap->ack_match != 0) { | 
 | 3498 | 			if ((ap->rxconfig & ~ANEG_CFG_ACK) == | 
 | 3499 | 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { | 
 | 3500 | 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT; | 
 | 3501 | 			} else { | 
 | 3502 | 				ap->state = ANEG_STATE_AN_ENABLE; | 
 | 3503 | 			} | 
 | 3504 | 		} else if (ap->ability_match != 0 && | 
 | 3505 | 			   ap->rxconfig == 0) { | 
 | 3506 | 			ap->state = ANEG_STATE_AN_ENABLE; | 
 | 3507 | 		} | 
 | 3508 | 		break; | 
 | 3509 |  | 
 | 3510 | 	case ANEG_STATE_COMPLETE_ACK_INIT: | 
 | 3511 | 		if (ap->rxconfig & ANEG_CFG_INVAL) { | 
 | 3512 | 			ret = ANEG_FAILED; | 
 | 3513 | 			break; | 
 | 3514 | 		} | 
 | 3515 | 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | | 
 | 3516 | 			       MR_LP_ADV_HALF_DUPLEX | | 
 | 3517 | 			       MR_LP_ADV_SYM_PAUSE | | 
 | 3518 | 			       MR_LP_ADV_ASYM_PAUSE | | 
 | 3519 | 			       MR_LP_ADV_REMOTE_FAULT1 | | 
 | 3520 | 			       MR_LP_ADV_REMOTE_FAULT2 | | 
 | 3521 | 			       MR_LP_ADV_NEXT_PAGE | | 
 | 3522 | 			       MR_TOGGLE_RX | | 
 | 3523 | 			       MR_NP_RX); | 
 | 3524 | 		if (ap->rxconfig & ANEG_CFG_FD) | 
 | 3525 | 			ap->flags |= MR_LP_ADV_FULL_DUPLEX; | 
 | 3526 | 		if (ap->rxconfig & ANEG_CFG_HD) | 
 | 3527 | 			ap->flags |= MR_LP_ADV_HALF_DUPLEX; | 
 | 3528 | 		if (ap->rxconfig & ANEG_CFG_PS1) | 
 | 3529 | 			ap->flags |= MR_LP_ADV_SYM_PAUSE; | 
 | 3530 | 		if (ap->rxconfig & ANEG_CFG_PS2) | 
 | 3531 | 			ap->flags |= MR_LP_ADV_ASYM_PAUSE; | 
 | 3532 | 		if (ap->rxconfig & ANEG_CFG_RF1) | 
 | 3533 | 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1; | 
 | 3534 | 		if (ap->rxconfig & ANEG_CFG_RF2) | 
 | 3535 | 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2; | 
 | 3536 | 		if (ap->rxconfig & ANEG_CFG_NP) | 
 | 3537 | 			ap->flags |= MR_LP_ADV_NEXT_PAGE; | 
 | 3538 |  | 
 | 3539 | 		ap->link_time = ap->cur_time; | 
 | 3540 |  | 
 | 3541 | 		ap->flags ^= (MR_TOGGLE_TX); | 
 | 3542 | 		if (ap->rxconfig & 0x0008) | 
 | 3543 | 			ap->flags |= MR_TOGGLE_RX; | 
 | 3544 | 		if (ap->rxconfig & ANEG_CFG_NP) | 
 | 3545 | 			ap->flags |= MR_NP_RX; | 
 | 3546 | 		ap->flags |= MR_PAGE_RX; | 
 | 3547 |  | 
 | 3548 | 		ap->state = ANEG_STATE_COMPLETE_ACK; | 
 | 3549 | 		ret = ANEG_TIMER_ENAB; | 
 | 3550 | 		break; | 
 | 3551 |  | 
 | 3552 | 	case ANEG_STATE_COMPLETE_ACK: | 
 | 3553 | 		if (ap->ability_match != 0 && | 
 | 3554 | 		    ap->rxconfig == 0) { | 
 | 3555 | 			ap->state = ANEG_STATE_AN_ENABLE; | 
 | 3556 | 			break; | 
 | 3557 | 		} | 
 | 3558 | 		delta = ap->cur_time - ap->link_time; | 
 | 3559 | 		if (delta > ANEG_STATE_SETTLE_TIME) { | 
 | 3560 | 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { | 
 | 3561 | 				ap->state = ANEG_STATE_IDLE_DETECT_INIT; | 
 | 3562 | 			} else { | 
 | 3563 | 				if ((ap->txconfig & ANEG_CFG_NP) == 0 && | 
 | 3564 | 				    !(ap->flags & MR_NP_RX)) { | 
 | 3565 | 					ap->state = ANEG_STATE_IDLE_DETECT_INIT; | 
 | 3566 | 				} else { | 
 | 3567 | 					ret = ANEG_FAILED; | 
 | 3568 | 				} | 
 | 3569 | 			} | 
 | 3570 | 		} | 
 | 3571 | 		break; | 
 | 3572 |  | 
 | 3573 | 	case ANEG_STATE_IDLE_DETECT_INIT: | 
 | 3574 | 		ap->link_time = ap->cur_time; | 
 | 3575 | 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; | 
 | 3576 | 		tw32_f(MAC_MODE, tp->mac_mode); | 
 | 3577 | 		udelay(40); | 
 | 3578 |  | 
 | 3579 | 		ap->state = ANEG_STATE_IDLE_DETECT; | 
 | 3580 | 		ret = ANEG_TIMER_ENAB; | 
 | 3581 | 		break; | 
 | 3582 |  | 
 | 3583 | 	case ANEG_STATE_IDLE_DETECT: | 
 | 3584 | 		if (ap->ability_match != 0 && | 
 | 3585 | 		    ap->rxconfig == 0) { | 
 | 3586 | 			ap->state = ANEG_STATE_AN_ENABLE; | 
 | 3587 | 			break; | 
 | 3588 | 		} | 
 | 3589 | 		delta = ap->cur_time - ap->link_time; | 
 | 3590 | 		if (delta > ANEG_STATE_SETTLE_TIME) { | 
 | 3591 | 			/* XXX another gem from the Broadcom driver :( */ | 
 | 3592 | 			ap->state = ANEG_STATE_LINK_OK; | 
 | 3593 | 		} | 
 | 3594 | 		break; | 
 | 3595 |  | 
 | 3596 | 	case ANEG_STATE_LINK_OK: | 
 | 3597 | 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); | 
 | 3598 | 		ret = ANEG_DONE; | 
 | 3599 | 		break; | 
 | 3600 |  | 
 | 3601 | 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT: | 
 | 3602 | 		/* ??? unimplemented */ | 
 | 3603 | 		break; | 
 | 3604 |  | 
 | 3605 | 	case ANEG_STATE_NEXT_PAGE_WAIT: | 
 | 3606 | 		/* ??? unimplemented */ | 
 | 3607 | 		break; | 
 | 3608 |  | 
 | 3609 | 	default: | 
 | 3610 | 		ret = ANEG_FAILED; | 
 | 3611 | 		break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 3612 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3613 |  | 
 | 3614 | 	return ret; | 
 | 3615 | } | 
 | 3616 |  | 
| Matt Carlson | 5be73b4 | 2007-12-20 20:09:29 -0800 | [diff] [blame] | 3617 | static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3618 | { | 
 | 3619 | 	int res = 0; | 
 | 3620 | 	struct tg3_fiber_aneginfo aninfo; | 
 | 3621 | 	int status = ANEG_FAILED; | 
 | 3622 | 	unsigned int tick; | 
 | 3623 | 	u32 tmp; | 
 | 3624 |  | 
 | 3625 | 	tw32_f(MAC_TX_AUTO_NEG, 0); | 
 | 3626 |  | 
 | 3627 | 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; | 
 | 3628 | 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); | 
 | 3629 | 	udelay(40); | 
 | 3630 |  | 
 | 3631 | 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); | 
 | 3632 | 	udelay(40); | 
 | 3633 |  | 
 | 3634 | 	memset(&aninfo, 0, sizeof(aninfo)); | 
 | 3635 | 	aninfo.flags |= MR_AN_ENABLE; | 
 | 3636 | 	aninfo.state = ANEG_STATE_UNKNOWN; | 
 | 3637 | 	aninfo.cur_time = 0; | 
 | 3638 | 	tick = 0; | 
 | 3639 | 	while (++tick < 195000) { | 
 | 3640 | 		status = tg3_fiber_aneg_smachine(tp, &aninfo); | 
 | 3641 | 		if (status == ANEG_DONE || status == ANEG_FAILED) | 
 | 3642 | 			break; | 
 | 3643 |  | 
 | 3644 | 		udelay(1); | 
 | 3645 | 	} | 
 | 3646 |  | 
 | 3647 | 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; | 
 | 3648 | 	tw32_f(MAC_MODE, tp->mac_mode); | 
 | 3649 | 	udelay(40); | 
 | 3650 |  | 
| Matt Carlson | 5be73b4 | 2007-12-20 20:09:29 -0800 | [diff] [blame] | 3651 | 	*txflags = aninfo.txconfig; | 
 | 3652 | 	*rxflags = aninfo.flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3653 |  | 
 | 3654 | 	if (status == ANEG_DONE && | 
 | 3655 | 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | | 
 | 3656 | 			     MR_LP_ADV_FULL_DUPLEX))) | 
 | 3657 | 		res = 1; | 
 | 3658 |  | 
 | 3659 | 	return res; | 
 | 3660 | } | 
 | 3661 |  | 
 | 3662 | static void tg3_init_bcm8002(struct tg3 *tp) | 
 | 3663 | { | 
 | 3664 | 	u32 mac_status = tr32(MAC_STATUS); | 
 | 3665 | 	int i; | 
 | 3666 |  | 
 | 3667 | 	/* Reset when initting first time or we have a link. */ | 
 | 3668 | 	if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) && | 
 | 3669 | 	    !(mac_status & MAC_STATUS_PCS_SYNCED)) | 
 | 3670 | 		return; | 
 | 3671 |  | 
 | 3672 | 	/* Set PLL lock range. */ | 
 | 3673 | 	tg3_writephy(tp, 0x16, 0x8007); | 
 | 3674 |  | 
 | 3675 | 	/* SW reset */ | 
 | 3676 | 	tg3_writephy(tp, MII_BMCR, BMCR_RESET); | 
 | 3677 |  | 
 | 3678 | 	/* Wait for reset to complete. */ | 
 | 3679 | 	/* XXX schedule_timeout() ... */ | 
 | 3680 | 	for (i = 0; i < 500; i++) | 
 | 3681 | 		udelay(10); | 
 | 3682 |  | 
 | 3683 | 	/* Config mode; select PMA/Ch 1 regs. */ | 
 | 3684 | 	tg3_writephy(tp, 0x10, 0x8411); | 
 | 3685 |  | 
 | 3686 | 	/* Enable auto-lock and comdet, select txclk for tx. */ | 
 | 3687 | 	tg3_writephy(tp, 0x11, 0x0a10); | 
 | 3688 |  | 
 | 3689 | 	tg3_writephy(tp, 0x18, 0x00a0); | 
 | 3690 | 	tg3_writephy(tp, 0x16, 0x41ff); | 
 | 3691 |  | 
 | 3692 | 	/* Assert and deassert POR. */ | 
 | 3693 | 	tg3_writephy(tp, 0x13, 0x0400); | 
 | 3694 | 	udelay(40); | 
 | 3695 | 	tg3_writephy(tp, 0x13, 0x0000); | 
 | 3696 |  | 
 | 3697 | 	tg3_writephy(tp, 0x11, 0x0a50); | 
 | 3698 | 	udelay(40); | 
 | 3699 | 	tg3_writephy(tp, 0x11, 0x0a10); | 
 | 3700 |  | 
 | 3701 | 	/* Wait for signal to stabilize */ | 
 | 3702 | 	/* XXX schedule_timeout() ... */ | 
 | 3703 | 	for (i = 0; i < 15000; i++) | 
 | 3704 | 		udelay(10); | 
 | 3705 |  | 
 | 3706 | 	/* Deselect the channel register so we can read the PHYID | 
 | 3707 | 	 * later. | 
 | 3708 | 	 */ | 
 | 3709 | 	tg3_writephy(tp, 0x10, 0x8011); | 
 | 3710 | } | 
 | 3711 |  | 
 | 3712 | static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) | 
 | 3713 | { | 
| Matt Carlson | 82cd3d1 | 2007-12-20 20:09:00 -0800 | [diff] [blame] | 3714 | 	u16 flowctrl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3715 | 	u32 sg_dig_ctrl, sg_dig_status; | 
 | 3716 | 	u32 serdes_cfg, expected_sg_dig_ctrl; | 
 | 3717 | 	int workaround, port_a; | 
 | 3718 | 	int current_link_up; | 
 | 3719 |  | 
 | 3720 | 	serdes_cfg = 0; | 
 | 3721 | 	expected_sg_dig_ctrl = 0; | 
 | 3722 | 	workaround = 0; | 
 | 3723 | 	port_a = 1; | 
 | 3724 | 	current_link_up = 0; | 
 | 3725 |  | 
 | 3726 | 	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && | 
 | 3727 | 	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { | 
 | 3728 | 		workaround = 1; | 
 | 3729 | 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) | 
 | 3730 | 			port_a = 0; | 
 | 3731 |  | 
 | 3732 | 		/* preserve bits 0-11,13,14 for signal pre-emphasis */ | 
 | 3733 | 		/* preserve bits 20-23 for voltage regulator */ | 
 | 3734 | 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; | 
 | 3735 | 	} | 
 | 3736 |  | 
 | 3737 | 	sg_dig_ctrl = tr32(SG_DIG_CTRL); | 
 | 3738 |  | 
 | 3739 | 	if (tp->link_config.autoneg != AUTONEG_ENABLE) { | 
| Matt Carlson | c98f6e3 | 2007-12-20 20:08:32 -0800 | [diff] [blame] | 3740 | 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3741 | 			if (workaround) { | 
 | 3742 | 				u32 val = serdes_cfg; | 
 | 3743 |  | 
 | 3744 | 				if (port_a) | 
 | 3745 | 					val |= 0xc010000; | 
 | 3746 | 				else | 
 | 3747 | 					val |= 0x4010000; | 
 | 3748 | 				tw32_f(MAC_SERDES_CFG, val); | 
 | 3749 | 			} | 
| Matt Carlson | c98f6e3 | 2007-12-20 20:08:32 -0800 | [diff] [blame] | 3750 |  | 
 | 3751 | 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3752 | 		} | 
 | 3753 | 		if (mac_status & MAC_STATUS_PCS_SYNCED) { | 
 | 3754 | 			tg3_setup_flow_control(tp, 0, 0); | 
 | 3755 | 			current_link_up = 1; | 
 | 3756 | 		} | 
 | 3757 | 		goto out; | 
 | 3758 | 	} | 
 | 3759 |  | 
 | 3760 | 	/* Want auto-negotiation.  */ | 
| Matt Carlson | c98f6e3 | 2007-12-20 20:08:32 -0800 | [diff] [blame] | 3761 | 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3762 |  | 
| Matt Carlson | 82cd3d1 | 2007-12-20 20:09:00 -0800 | [diff] [blame] | 3763 | 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); | 
 | 3764 | 	if (flowctrl & ADVERTISE_1000XPAUSE) | 
 | 3765 | 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; | 
 | 3766 | 	if (flowctrl & ADVERTISE_1000XPSE_ASYM) | 
 | 3767 | 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3768 |  | 
 | 3769 | 	if (sg_dig_ctrl != expected_sg_dig_ctrl) { | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 3770 | 		if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) && | 
 | 3771 | 		    tp->serdes_counter && | 
 | 3772 | 		    ((mac_status & (MAC_STATUS_PCS_SYNCED | | 
 | 3773 | 				    MAC_STATUS_RCVD_CFG)) == | 
 | 3774 | 		     MAC_STATUS_PCS_SYNCED)) { | 
 | 3775 | 			tp->serdes_counter--; | 
 | 3776 | 			current_link_up = 1; | 
 | 3777 | 			goto out; | 
 | 3778 | 		} | 
 | 3779 | restart_autoneg: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3780 | 		if (workaround) | 
 | 3781 | 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); | 
| Matt Carlson | c98f6e3 | 2007-12-20 20:08:32 -0800 | [diff] [blame] | 3782 | 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3783 | 		udelay(5); | 
 | 3784 | 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); | 
 | 3785 |  | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 3786 | 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; | 
 | 3787 | 		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3788 | 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED | | 
 | 3789 | 				 MAC_STATUS_SIGNAL_DET)) { | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 3790 | 		sg_dig_status = tr32(SG_DIG_STATUS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3791 | 		mac_status = tr32(MAC_STATUS); | 
 | 3792 |  | 
| Matt Carlson | c98f6e3 | 2007-12-20 20:08:32 -0800 | [diff] [blame] | 3793 | 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3794 | 		    (mac_status & MAC_STATUS_PCS_SYNCED)) { | 
| Matt Carlson | 82cd3d1 | 2007-12-20 20:09:00 -0800 | [diff] [blame] | 3795 | 			u32 local_adv = 0, remote_adv = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3796 |  | 
| Matt Carlson | 82cd3d1 | 2007-12-20 20:09:00 -0800 | [diff] [blame] | 3797 | 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) | 
 | 3798 | 				local_adv |= ADVERTISE_1000XPAUSE; | 
 | 3799 | 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) | 
 | 3800 | 				local_adv |= ADVERTISE_1000XPSE_ASYM; | 
 | 3801 |  | 
| Matt Carlson | c98f6e3 | 2007-12-20 20:08:32 -0800 | [diff] [blame] | 3802 | 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) | 
| Matt Carlson | 82cd3d1 | 2007-12-20 20:09:00 -0800 | [diff] [blame] | 3803 | 				remote_adv |= LPA_1000XPAUSE; | 
| Matt Carlson | c98f6e3 | 2007-12-20 20:08:32 -0800 | [diff] [blame] | 3804 | 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) | 
| Matt Carlson | 82cd3d1 | 2007-12-20 20:09:00 -0800 | [diff] [blame] | 3805 | 				remote_adv |= LPA_1000XPAUSE_ASYM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3806 |  | 
 | 3807 | 			tg3_setup_flow_control(tp, local_adv, remote_adv); | 
 | 3808 | 			current_link_up = 1; | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 3809 | 			tp->serdes_counter = 0; | 
 | 3810 | 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 
| Matt Carlson | c98f6e3 | 2007-12-20 20:08:32 -0800 | [diff] [blame] | 3811 | 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 3812 | 			if (tp->serdes_counter) | 
 | 3813 | 				tp->serdes_counter--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3814 | 			else { | 
 | 3815 | 				if (workaround) { | 
 | 3816 | 					u32 val = serdes_cfg; | 
 | 3817 |  | 
 | 3818 | 					if (port_a) | 
 | 3819 | 						val |= 0xc010000; | 
 | 3820 | 					else | 
 | 3821 | 						val |= 0x4010000; | 
 | 3822 |  | 
 | 3823 | 					tw32_f(MAC_SERDES_CFG, val); | 
 | 3824 | 				} | 
 | 3825 |  | 
| Matt Carlson | c98f6e3 | 2007-12-20 20:08:32 -0800 | [diff] [blame] | 3826 | 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3827 | 				udelay(40); | 
 | 3828 |  | 
 | 3829 | 				/* Link parallel detection - link is up */ | 
 | 3830 | 				/* only if we have PCS_SYNC and not */ | 
 | 3831 | 				/* receiving config code words */ | 
 | 3832 | 				mac_status = tr32(MAC_STATUS); | 
 | 3833 | 				if ((mac_status & MAC_STATUS_PCS_SYNCED) && | 
 | 3834 | 				    !(mac_status & MAC_STATUS_RCVD_CFG)) { | 
 | 3835 | 					tg3_setup_flow_control(tp, 0, 0); | 
 | 3836 | 					current_link_up = 1; | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 3837 | 					tp->tg3_flags2 |= | 
 | 3838 | 						TG3_FLG2_PARALLEL_DETECT; | 
 | 3839 | 					tp->serdes_counter = | 
 | 3840 | 						SERDES_PARALLEL_DET_TIMEOUT; | 
 | 3841 | 				} else | 
 | 3842 | 					goto restart_autoneg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3843 | 			} | 
 | 3844 | 		} | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 3845 | 	} else { | 
 | 3846 | 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; | 
 | 3847 | 		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3848 | 	} | 
 | 3849 |  | 
 | 3850 | out: | 
 | 3851 | 	return current_link_up; | 
 | 3852 | } | 
 | 3853 |  | 
 | 3854 | static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) | 
 | 3855 | { | 
 | 3856 | 	int current_link_up = 0; | 
 | 3857 |  | 
| Michael Chan | 5cf64b8 | 2007-05-05 12:11:21 -0700 | [diff] [blame] | 3858 | 	if (!(mac_status & MAC_STATUS_PCS_SYNCED)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3859 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3860 |  | 
 | 3861 | 	if (tp->link_config.autoneg == AUTONEG_ENABLE) { | 
| Matt Carlson | 5be73b4 | 2007-12-20 20:09:29 -0800 | [diff] [blame] | 3862 | 		u32 txflags, rxflags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3863 | 		int i; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 3864 |  | 
| Matt Carlson | 5be73b4 | 2007-12-20 20:09:29 -0800 | [diff] [blame] | 3865 | 		if (fiber_autoneg(tp, &txflags, &rxflags)) { | 
 | 3866 | 			u32 local_adv = 0, remote_adv = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3867 |  | 
| Matt Carlson | 5be73b4 | 2007-12-20 20:09:29 -0800 | [diff] [blame] | 3868 | 			if (txflags & ANEG_CFG_PS1) | 
 | 3869 | 				local_adv |= ADVERTISE_1000XPAUSE; | 
 | 3870 | 			if (txflags & ANEG_CFG_PS2) | 
 | 3871 | 				local_adv |= ADVERTISE_1000XPSE_ASYM; | 
 | 3872 |  | 
 | 3873 | 			if (rxflags & MR_LP_ADV_SYM_PAUSE) | 
 | 3874 | 				remote_adv |= LPA_1000XPAUSE; | 
 | 3875 | 			if (rxflags & MR_LP_ADV_ASYM_PAUSE) | 
 | 3876 | 				remote_adv |= LPA_1000XPAUSE_ASYM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3877 |  | 
 | 3878 | 			tg3_setup_flow_control(tp, local_adv, remote_adv); | 
 | 3879 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3880 | 			current_link_up = 1; | 
 | 3881 | 		} | 
 | 3882 | 		for (i = 0; i < 30; i++) { | 
 | 3883 | 			udelay(20); | 
 | 3884 | 			tw32_f(MAC_STATUS, | 
 | 3885 | 			       (MAC_STATUS_SYNC_CHANGED | | 
 | 3886 | 				MAC_STATUS_CFG_CHANGED)); | 
 | 3887 | 			udelay(40); | 
 | 3888 | 			if ((tr32(MAC_STATUS) & | 
 | 3889 | 			     (MAC_STATUS_SYNC_CHANGED | | 
 | 3890 | 			      MAC_STATUS_CFG_CHANGED)) == 0) | 
 | 3891 | 				break; | 
 | 3892 | 		} | 
 | 3893 |  | 
 | 3894 | 		mac_status = tr32(MAC_STATUS); | 
 | 3895 | 		if (current_link_up == 0 && | 
 | 3896 | 		    (mac_status & MAC_STATUS_PCS_SYNCED) && | 
 | 3897 | 		    !(mac_status & MAC_STATUS_RCVD_CFG)) | 
 | 3898 | 			current_link_up = 1; | 
 | 3899 | 	} else { | 
| Matt Carlson | 5be73b4 | 2007-12-20 20:09:29 -0800 | [diff] [blame] | 3900 | 		tg3_setup_flow_control(tp, 0, 0); | 
 | 3901 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3902 | 		/* Forcing 1000FD link up. */ | 
 | 3903 | 		current_link_up = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3904 |  | 
 | 3905 | 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); | 
 | 3906 | 		udelay(40); | 
| Matt Carlson | e8f3f6c | 2007-07-11 19:47:55 -0700 | [diff] [blame] | 3907 |  | 
 | 3908 | 		tw32_f(MAC_MODE, tp->mac_mode); | 
 | 3909 | 		udelay(40); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3910 | 	} | 
 | 3911 |  | 
 | 3912 | out: | 
 | 3913 | 	return current_link_up; | 
 | 3914 | } | 
 | 3915 |  | 
 | 3916 | static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) | 
 | 3917 | { | 
 | 3918 | 	u32 orig_pause_cfg; | 
 | 3919 | 	u16 orig_active_speed; | 
 | 3920 | 	u8 orig_active_duplex; | 
 | 3921 | 	u32 mac_status; | 
 | 3922 | 	int current_link_up; | 
 | 3923 | 	int i; | 
 | 3924 |  | 
| Matt Carlson | 8d01862 | 2007-12-20 20:05:44 -0800 | [diff] [blame] | 3925 | 	orig_pause_cfg = tp->link_config.active_flowctrl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3926 | 	orig_active_speed = tp->link_config.active_speed; | 
 | 3927 | 	orig_active_duplex = tp->link_config.active_duplex; | 
 | 3928 |  | 
 | 3929 | 	if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) && | 
 | 3930 | 	    netif_carrier_ok(tp->dev) && | 
 | 3931 | 	    (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) { | 
 | 3932 | 		mac_status = tr32(MAC_STATUS); | 
 | 3933 | 		mac_status &= (MAC_STATUS_PCS_SYNCED | | 
 | 3934 | 			       MAC_STATUS_SIGNAL_DET | | 
 | 3935 | 			       MAC_STATUS_CFG_CHANGED | | 
 | 3936 | 			       MAC_STATUS_RCVD_CFG); | 
 | 3937 | 		if (mac_status == (MAC_STATUS_PCS_SYNCED | | 
 | 3938 | 				   MAC_STATUS_SIGNAL_DET)) { | 
 | 3939 | 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | | 
 | 3940 | 					    MAC_STATUS_CFG_CHANGED)); | 
 | 3941 | 			return 0; | 
 | 3942 | 		} | 
 | 3943 | 	} | 
 | 3944 |  | 
 | 3945 | 	tw32_f(MAC_TX_AUTO_NEG, 0); | 
 | 3946 |  | 
 | 3947 | 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); | 
 | 3948 | 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; | 
 | 3949 | 	tw32_f(MAC_MODE, tp->mac_mode); | 
 | 3950 | 	udelay(40); | 
 | 3951 |  | 
 | 3952 | 	if (tp->phy_id == PHY_ID_BCM8002) | 
 | 3953 | 		tg3_init_bcm8002(tp); | 
 | 3954 |  | 
 | 3955 | 	/* Enable link change event even when serdes polling.  */ | 
 | 3956 | 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); | 
 | 3957 | 	udelay(40); | 
 | 3958 |  | 
 | 3959 | 	current_link_up = 0; | 
 | 3960 | 	mac_status = tr32(MAC_STATUS); | 
 | 3961 |  | 
 | 3962 | 	if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) | 
 | 3963 | 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); | 
 | 3964 | 	else | 
 | 3965 | 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); | 
 | 3966 |  | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 3967 | 	tp->napi[0].hw_status->status = | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3968 | 		(SD_STATUS_UPDATED | | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 3969 | 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3970 |  | 
 | 3971 | 	for (i = 0; i < 100; i++) { | 
 | 3972 | 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | | 
 | 3973 | 				    MAC_STATUS_CFG_CHANGED)); | 
 | 3974 | 		udelay(5); | 
 | 3975 | 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 3976 | 					 MAC_STATUS_CFG_CHANGED | | 
 | 3977 | 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3978 | 			break; | 
 | 3979 | 	} | 
 | 3980 |  | 
 | 3981 | 	mac_status = tr32(MAC_STATUS); | 
 | 3982 | 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { | 
 | 3983 | 		current_link_up = 0; | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 3984 | 		if (tp->link_config.autoneg == AUTONEG_ENABLE && | 
 | 3985 | 		    tp->serdes_counter == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3986 | 			tw32_f(MAC_MODE, (tp->mac_mode | | 
 | 3987 | 					  MAC_MODE_SEND_CONFIGS)); | 
 | 3988 | 			udelay(1); | 
 | 3989 | 			tw32_f(MAC_MODE, tp->mac_mode); | 
 | 3990 | 		} | 
 | 3991 | 	} | 
 | 3992 |  | 
 | 3993 | 	if (current_link_up == 1) { | 
 | 3994 | 		tp->link_config.active_speed = SPEED_1000; | 
 | 3995 | 		tp->link_config.active_duplex = DUPLEX_FULL; | 
 | 3996 | 		tw32(MAC_LED_CTRL, (tp->led_ctrl | | 
 | 3997 | 				    LED_CTRL_LNKLED_OVERRIDE | | 
 | 3998 | 				    LED_CTRL_1000MBPS_ON)); | 
 | 3999 | 	} else { | 
 | 4000 | 		tp->link_config.active_speed = SPEED_INVALID; | 
 | 4001 | 		tp->link_config.active_duplex = DUPLEX_INVALID; | 
 | 4002 | 		tw32(MAC_LED_CTRL, (tp->led_ctrl | | 
 | 4003 | 				    LED_CTRL_LNKLED_OVERRIDE | | 
 | 4004 | 				    LED_CTRL_TRAFFIC_OVERRIDE)); | 
 | 4005 | 	} | 
 | 4006 |  | 
 | 4007 | 	if (current_link_up != netif_carrier_ok(tp->dev)) { | 
 | 4008 | 		if (current_link_up) | 
 | 4009 | 			netif_carrier_on(tp->dev); | 
 | 4010 | 		else | 
 | 4011 | 			netif_carrier_off(tp->dev); | 
 | 4012 | 		tg3_link_report(tp); | 
 | 4013 | 	} else { | 
| Matt Carlson | 8d01862 | 2007-12-20 20:05:44 -0800 | [diff] [blame] | 4014 | 		u32 now_pause_cfg = tp->link_config.active_flowctrl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4015 | 		if (orig_pause_cfg != now_pause_cfg || | 
 | 4016 | 		    orig_active_speed != tp->link_config.active_speed || | 
 | 4017 | 		    orig_active_duplex != tp->link_config.active_duplex) | 
 | 4018 | 			tg3_link_report(tp); | 
 | 4019 | 	} | 
 | 4020 |  | 
 | 4021 | 	return 0; | 
 | 4022 | } | 
 | 4023 |  | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4024 | static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) | 
 | 4025 | { | 
 | 4026 | 	int current_link_up, err = 0; | 
 | 4027 | 	u32 bmsr, bmcr; | 
 | 4028 | 	u16 current_speed; | 
 | 4029 | 	u8 current_duplex; | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 4030 | 	u32 local_adv, remote_adv; | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4031 |  | 
 | 4032 | 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; | 
 | 4033 | 	tw32_f(MAC_MODE, tp->mac_mode); | 
 | 4034 | 	udelay(40); | 
 | 4035 |  | 
 | 4036 | 	tw32(MAC_EVENT, 0); | 
 | 4037 |  | 
 | 4038 | 	tw32_f(MAC_STATUS, | 
 | 4039 | 	     (MAC_STATUS_SYNC_CHANGED | | 
 | 4040 | 	      MAC_STATUS_CFG_CHANGED | | 
 | 4041 | 	      MAC_STATUS_MI_COMPLETION | | 
 | 4042 | 	      MAC_STATUS_LNKSTATE_CHANGED)); | 
 | 4043 | 	udelay(40); | 
 | 4044 |  | 
 | 4045 | 	if (force_reset) | 
 | 4046 | 		tg3_phy_reset(tp); | 
 | 4047 |  | 
 | 4048 | 	current_link_up = 0; | 
 | 4049 | 	current_speed = SPEED_INVALID; | 
 | 4050 | 	current_duplex = DUPLEX_INVALID; | 
 | 4051 |  | 
 | 4052 | 	err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 
 | 4053 | 	err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 
| Michael Chan | d4d2c55 | 2006-03-20 17:47:20 -0800 | [diff] [blame] | 4054 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { | 
 | 4055 | 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) | 
 | 4056 | 			bmsr |= BMSR_LSTATUS; | 
 | 4057 | 		else | 
 | 4058 | 			bmsr &= ~BMSR_LSTATUS; | 
 | 4059 | 	} | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4060 |  | 
 | 4061 | 	err |= tg3_readphy(tp, MII_BMCR, &bmcr); | 
 | 4062 |  | 
 | 4063 | 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && | 
| Matt Carlson | 2bd3ed0 | 2008-06-09 15:39:55 -0700 | [diff] [blame] | 4064 | 	    (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4065 | 		/* do nothing, just check for link up at the end */ | 
 | 4066 | 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) { | 
 | 4067 | 		u32 adv, new_adv; | 
 | 4068 |  | 
 | 4069 | 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv); | 
 | 4070 | 		new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | | 
 | 4071 | 				  ADVERTISE_1000XPAUSE | | 
 | 4072 | 				  ADVERTISE_1000XPSE_ASYM | | 
 | 4073 | 				  ADVERTISE_SLCT); | 
 | 4074 |  | 
| Matt Carlson | ba4d07a | 2007-12-20 20:08:00 -0800 | [diff] [blame] | 4075 | 		new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4076 |  | 
 | 4077 | 		if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) | 
 | 4078 | 			new_adv |= ADVERTISE_1000XHALF; | 
 | 4079 | 		if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) | 
 | 4080 | 			new_adv |= ADVERTISE_1000XFULL; | 
 | 4081 |  | 
 | 4082 | 		if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { | 
 | 4083 | 			tg3_writephy(tp, MII_ADVERTISE, new_adv); | 
 | 4084 | 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; | 
 | 4085 | 			tg3_writephy(tp, MII_BMCR, bmcr); | 
 | 4086 |  | 
 | 4087 | 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 4088 | 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4089 | 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 
 | 4090 |  | 
 | 4091 | 			return err; | 
 | 4092 | 		} | 
 | 4093 | 	} else { | 
 | 4094 | 		u32 new_bmcr; | 
 | 4095 |  | 
 | 4096 | 		bmcr &= ~BMCR_SPEED1000; | 
 | 4097 | 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); | 
 | 4098 |  | 
 | 4099 | 		if (tp->link_config.duplex == DUPLEX_FULL) | 
 | 4100 | 			new_bmcr |= BMCR_FULLDPLX; | 
 | 4101 |  | 
 | 4102 | 		if (new_bmcr != bmcr) { | 
 | 4103 | 			/* BMCR_SPEED1000 is a reserved bit that needs | 
 | 4104 | 			 * to be set on write. | 
 | 4105 | 			 */ | 
 | 4106 | 			new_bmcr |= BMCR_SPEED1000; | 
 | 4107 |  | 
 | 4108 | 			/* Force a linkdown */ | 
 | 4109 | 			if (netif_carrier_ok(tp->dev)) { | 
 | 4110 | 				u32 adv; | 
 | 4111 |  | 
 | 4112 | 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv); | 
 | 4113 | 				adv &= ~(ADVERTISE_1000XFULL | | 
 | 4114 | 					 ADVERTISE_1000XHALF | | 
 | 4115 | 					 ADVERTISE_SLCT); | 
 | 4116 | 				tg3_writephy(tp, MII_ADVERTISE, adv); | 
 | 4117 | 				tg3_writephy(tp, MII_BMCR, bmcr | | 
 | 4118 | 							   BMCR_ANRESTART | | 
 | 4119 | 							   BMCR_ANENABLE); | 
 | 4120 | 				udelay(10); | 
 | 4121 | 				netif_carrier_off(tp->dev); | 
 | 4122 | 			} | 
 | 4123 | 			tg3_writephy(tp, MII_BMCR, new_bmcr); | 
 | 4124 | 			bmcr = new_bmcr; | 
 | 4125 | 			err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 
 | 4126 | 			err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 
| Michael Chan | d4d2c55 | 2006-03-20 17:47:20 -0800 | [diff] [blame] | 4127 | 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 
 | 4128 | 			    ASIC_REV_5714) { | 
 | 4129 | 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) | 
 | 4130 | 					bmsr |= BMSR_LSTATUS; | 
 | 4131 | 				else | 
 | 4132 | 					bmsr &= ~BMSR_LSTATUS; | 
 | 4133 | 			} | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4134 | 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 
 | 4135 | 		} | 
 | 4136 | 	} | 
 | 4137 |  | 
 | 4138 | 	if (bmsr & BMSR_LSTATUS) { | 
 | 4139 | 		current_speed = SPEED_1000; | 
 | 4140 | 		current_link_up = 1; | 
 | 4141 | 		if (bmcr & BMCR_FULLDPLX) | 
 | 4142 | 			current_duplex = DUPLEX_FULL; | 
 | 4143 | 		else | 
 | 4144 | 			current_duplex = DUPLEX_HALF; | 
 | 4145 |  | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 4146 | 		local_adv = 0; | 
 | 4147 | 		remote_adv = 0; | 
 | 4148 |  | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4149 | 		if (bmcr & BMCR_ANENABLE) { | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 4150 | 			u32 common; | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4151 |  | 
 | 4152 | 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); | 
 | 4153 | 			err |= tg3_readphy(tp, MII_LPA, &remote_adv); | 
 | 4154 | 			common = local_adv & remote_adv; | 
 | 4155 | 			if (common & (ADVERTISE_1000XHALF | | 
 | 4156 | 				      ADVERTISE_1000XFULL)) { | 
 | 4157 | 				if (common & ADVERTISE_1000XFULL) | 
 | 4158 | 					current_duplex = DUPLEX_FULL; | 
 | 4159 | 				else | 
 | 4160 | 					current_duplex = DUPLEX_HALF; | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4161 | 			} | 
 | 4162 | 			else | 
 | 4163 | 				current_link_up = 0; | 
 | 4164 | 		} | 
 | 4165 | 	} | 
 | 4166 |  | 
| Matt Carlson | ef167e2 | 2007-12-20 20:10:01 -0800 | [diff] [blame] | 4167 | 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL) | 
 | 4168 | 		tg3_setup_flow_control(tp, local_adv, remote_adv); | 
 | 4169 |  | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4170 | 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; | 
 | 4171 | 	if (tp->link_config.active_duplex == DUPLEX_HALF) | 
 | 4172 | 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX; | 
 | 4173 |  | 
 | 4174 | 	tw32_f(MAC_MODE, tp->mac_mode); | 
 | 4175 | 	udelay(40); | 
 | 4176 |  | 
 | 4177 | 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); | 
 | 4178 |  | 
 | 4179 | 	tp->link_config.active_speed = current_speed; | 
 | 4180 | 	tp->link_config.active_duplex = current_duplex; | 
 | 4181 |  | 
 | 4182 | 	if (current_link_up != netif_carrier_ok(tp->dev)) { | 
 | 4183 | 		if (current_link_up) | 
 | 4184 | 			netif_carrier_on(tp->dev); | 
 | 4185 | 		else { | 
 | 4186 | 			netif_carrier_off(tp->dev); | 
 | 4187 | 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 
 | 4188 | 		} | 
 | 4189 | 		tg3_link_report(tp); | 
 | 4190 | 	} | 
 | 4191 | 	return err; | 
 | 4192 | } | 
 | 4193 |  | 
 | 4194 | static void tg3_serdes_parallel_detect(struct tg3 *tp) | 
 | 4195 | { | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 4196 | 	if (tp->serdes_counter) { | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4197 | 		/* Give autoneg time to complete. */ | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 4198 | 		tp->serdes_counter--; | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4199 | 		return; | 
 | 4200 | 	} | 
 | 4201 | 	if (!netif_carrier_ok(tp->dev) && | 
 | 4202 | 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) { | 
 | 4203 | 		u32 bmcr; | 
 | 4204 |  | 
 | 4205 | 		tg3_readphy(tp, MII_BMCR, &bmcr); | 
 | 4206 | 		if (bmcr & BMCR_ANENABLE) { | 
 | 4207 | 			u32 phy1, phy2; | 
 | 4208 |  | 
 | 4209 | 			/* Select shadow register 0x1f */ | 
 | 4210 | 			tg3_writephy(tp, 0x1c, 0x7c00); | 
 | 4211 | 			tg3_readphy(tp, 0x1c, &phy1); | 
 | 4212 |  | 
 | 4213 | 			/* Select expansion interrupt status register */ | 
 | 4214 | 			tg3_writephy(tp, 0x17, 0x0f01); | 
 | 4215 | 			tg3_readphy(tp, 0x15, &phy2); | 
 | 4216 | 			tg3_readphy(tp, 0x15, &phy2); | 
 | 4217 |  | 
 | 4218 | 			if ((phy1 & 0x10) && !(phy2 & 0x20)) { | 
 | 4219 | 				/* We have signal detect and not receiving | 
 | 4220 | 				 * config code words, link is up by parallel | 
 | 4221 | 				 * detection. | 
 | 4222 | 				 */ | 
 | 4223 |  | 
 | 4224 | 				bmcr &= ~BMCR_ANENABLE; | 
 | 4225 | 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; | 
 | 4226 | 				tg3_writephy(tp, MII_BMCR, bmcr); | 
 | 4227 | 				tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; | 
 | 4228 | 			} | 
 | 4229 | 		} | 
 | 4230 | 	} | 
 | 4231 | 	else if (netif_carrier_ok(tp->dev) && | 
 | 4232 | 		 (tp->link_config.autoneg == AUTONEG_ENABLE) && | 
 | 4233 | 		 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) { | 
 | 4234 | 		u32 phy2; | 
 | 4235 |  | 
 | 4236 | 		/* Select expansion interrupt status register */ | 
 | 4237 | 		tg3_writephy(tp, 0x17, 0x0f01); | 
 | 4238 | 		tg3_readphy(tp, 0x15, &phy2); | 
 | 4239 | 		if (phy2 & 0x20) { | 
 | 4240 | 			u32 bmcr; | 
 | 4241 |  | 
 | 4242 | 			/* Config code words received, turn on autoneg. */ | 
 | 4243 | 			tg3_readphy(tp, MII_BMCR, &bmcr); | 
 | 4244 | 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); | 
 | 4245 |  | 
 | 4246 | 			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 
 | 4247 |  | 
 | 4248 | 		} | 
 | 4249 | 	} | 
 | 4250 | } | 
 | 4251 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4252 | static int tg3_setup_phy(struct tg3 *tp, int force_reset) | 
 | 4253 | { | 
 | 4254 | 	int err; | 
 | 4255 |  | 
 | 4256 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 
 | 4257 | 		err = tg3_setup_fiber_phy(tp, force_reset); | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 4258 | 	} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 
 | 4259 | 		err = tg3_setup_fiber_mii_phy(tp, force_reset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4260 | 	} else { | 
 | 4261 | 		err = tg3_setup_copper_phy(tp, force_reset); | 
 | 4262 | 	} | 
 | 4263 |  | 
| Matt Carlson | bcb37f6 | 2008-11-03 16:52:09 -0800 | [diff] [blame] | 4264 | 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { | 
| Matt Carlson | aa6c91f | 2007-11-12 21:18:04 -0800 | [diff] [blame] | 4265 | 		u32 val, scale; | 
 | 4266 |  | 
 | 4267 | 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; | 
 | 4268 | 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) | 
 | 4269 | 			scale = 65; | 
 | 4270 | 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) | 
 | 4271 | 			scale = 6; | 
 | 4272 | 		else | 
 | 4273 | 			scale = 12; | 
 | 4274 |  | 
 | 4275 | 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; | 
 | 4276 | 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); | 
 | 4277 | 		tw32(GRC_MISC_CFG, val); | 
 | 4278 | 	} | 
 | 4279 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4280 | 	if (tp->link_config.active_speed == SPEED_1000 && | 
 | 4281 | 	    tp->link_config.active_duplex == DUPLEX_HALF) | 
 | 4282 | 		tw32(MAC_TX_LENGTHS, | 
 | 4283 | 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 
 | 4284 | 		      (6 << TX_LENGTHS_IPG_SHIFT) | | 
 | 4285 | 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); | 
 | 4286 | 	else | 
 | 4287 | 		tw32(MAC_TX_LENGTHS, | 
 | 4288 | 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 
 | 4289 | 		      (6 << TX_LENGTHS_IPG_SHIFT) | | 
 | 4290 | 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); | 
 | 4291 |  | 
 | 4292 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 
 | 4293 | 		if (netif_carrier_ok(tp->dev)) { | 
 | 4294 | 			tw32(HOSTCC_STAT_COAL_TICKS, | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 4295 | 			     tp->coal.stats_block_coalesce_usecs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4296 | 		} else { | 
 | 4297 | 			tw32(HOSTCC_STAT_COAL_TICKS, 0); | 
 | 4298 | 		} | 
 | 4299 | 	} | 
 | 4300 |  | 
| Matt Carlson | 8ed5d97 | 2007-05-07 00:25:49 -0700 | [diff] [blame] | 4301 | 	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) { | 
 | 4302 | 		u32 val = tr32(PCIE_PWR_MGMT_THRESH); | 
 | 4303 | 		if (!netif_carrier_ok(tp->dev)) | 
 | 4304 | 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | | 
 | 4305 | 			      tp->pwrmgmt_thresh; | 
 | 4306 | 		else | 
 | 4307 | 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK; | 
 | 4308 | 		tw32(PCIE_PWR_MGMT_THRESH, val); | 
 | 4309 | 	} | 
 | 4310 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4311 | 	return err; | 
 | 4312 | } | 
 | 4313 |  | 
| Michael Chan | df3e654 | 2006-05-26 17:48:07 -0700 | [diff] [blame] | 4314 | /* This is called whenever we suspect that the system chipset is re- | 
 | 4315 |  * ordering the sequence of MMIO to the tx send mailbox. The symptom | 
 | 4316 |  * is bogus tx completions. We try to recover by setting the | 
 | 4317 |  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later | 
 | 4318 |  * in the workqueue. | 
 | 4319 |  */ | 
 | 4320 | static void tg3_tx_recover(struct tg3 *tp) | 
 | 4321 | { | 
 | 4322 | 	BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || | 
 | 4323 | 	       tp->write32_tx_mbox == tg3_write_indirect_mbox); | 
 | 4324 |  | 
 | 4325 | 	printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-" | 
 | 4326 | 	       "mapped I/O cycles to the network device, attempting to " | 
 | 4327 | 	       "recover. Please report the problem to the driver maintainer " | 
 | 4328 | 	       "and include system chipset information.\n", tp->dev->name); | 
 | 4329 |  | 
 | 4330 | 	spin_lock(&tp->lock); | 
| Michael Chan | df3e654 | 2006-05-26 17:48:07 -0700 | [diff] [blame] | 4331 | 	tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; | 
| Michael Chan | df3e654 | 2006-05-26 17:48:07 -0700 | [diff] [blame] | 4332 | 	spin_unlock(&tp->lock); | 
 | 4333 | } | 
 | 4334 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 4335 | static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) | 
| Michael Chan | 1b2a720 | 2006-08-07 21:46:02 -0700 | [diff] [blame] | 4336 | { | 
 | 4337 | 	smp_mb(); | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 4338 | 	return tnapi->tx_pending - | 
 | 4339 | 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); | 
| Michael Chan | 1b2a720 | 2006-08-07 21:46:02 -0700 | [diff] [blame] | 4340 | } | 
 | 4341 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4342 | /* Tigon3 never reports partial packet sends.  So we do not | 
 | 4343 |  * need special logic to handle SKBs that have not had all | 
 | 4344 |  * of their frags sent yet, like SunGEM does. | 
 | 4345 |  */ | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4346 | static void tg3_tx(struct tg3_napi *tnapi) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4347 | { | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4348 | 	struct tg3 *tp = tnapi->tp; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 4349 | 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 4350 | 	u32 sw_idx = tnapi->tx_cons; | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 4351 | 	struct netdev_queue *txq; | 
 | 4352 | 	int index = tnapi - tp->napi; | 
 | 4353 |  | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 4354 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 4355 | 		index--; | 
 | 4356 |  | 
 | 4357 | 	txq = netdev_get_tx_queue(tp->dev, index); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4358 |  | 
 | 4359 | 	while (sw_idx != hw_idx) { | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 4360 | 		struct ring_info *ri = &tnapi->tx_buffers[sw_idx]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4361 | 		struct sk_buff *skb = ri->skb; | 
| Michael Chan | df3e654 | 2006-05-26 17:48:07 -0700 | [diff] [blame] | 4362 | 		int i, tx_bug = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4363 |  | 
| Michael Chan | df3e654 | 2006-05-26 17:48:07 -0700 | [diff] [blame] | 4364 | 		if (unlikely(skb == NULL)) { | 
 | 4365 | 			tg3_tx_recover(tp); | 
 | 4366 | 			return; | 
 | 4367 | 		} | 
 | 4368 |  | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 4369 | 		pci_unmap_single(tp->pdev, | 
 | 4370 | 				 pci_unmap_addr(ri, mapping), | 
 | 4371 | 				 skb_headlen(skb), | 
 | 4372 | 				 PCI_DMA_TODEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4373 |  | 
 | 4374 | 		ri->skb = NULL; | 
 | 4375 |  | 
 | 4376 | 		sw_idx = NEXT_TX(sw_idx); | 
 | 4377 |  | 
 | 4378 | 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 4379 | 			ri = &tnapi->tx_buffers[sw_idx]; | 
| Michael Chan | df3e654 | 2006-05-26 17:48:07 -0700 | [diff] [blame] | 4380 | 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) | 
 | 4381 | 				tx_bug = 1; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 4382 |  | 
 | 4383 | 			pci_unmap_page(tp->pdev, | 
 | 4384 | 				       pci_unmap_addr(ri, mapping), | 
 | 4385 | 				       skb_shinfo(skb)->frags[i].size, | 
 | 4386 | 				       PCI_DMA_TODEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4387 | 			sw_idx = NEXT_TX(sw_idx); | 
 | 4388 | 		} | 
 | 4389 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 4390 | 		dev_kfree_skb(skb); | 
| Michael Chan | df3e654 | 2006-05-26 17:48:07 -0700 | [diff] [blame] | 4391 |  | 
 | 4392 | 		if (unlikely(tx_bug)) { | 
 | 4393 | 			tg3_tx_recover(tp); | 
 | 4394 | 			return; | 
 | 4395 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4396 | 	} | 
 | 4397 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 4398 | 	tnapi->tx_cons = sw_idx; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4399 |  | 
| Michael Chan | 1b2a720 | 2006-08-07 21:46:02 -0700 | [diff] [blame] | 4400 | 	/* Need to make the tx_cons update visible to tg3_start_xmit() | 
 | 4401 | 	 * before checking for netif_queue_stopped().  Without the | 
 | 4402 | 	 * memory barrier, there is a small possibility that tg3_start_xmit() | 
 | 4403 | 	 * will miss it and cause the queue to be stopped forever. | 
 | 4404 | 	 */ | 
 | 4405 | 	smp_mb(); | 
 | 4406 |  | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 4407 | 	if (unlikely(netif_tx_queue_stopped(txq) && | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 4408 | 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 4409 | 		__netif_tx_lock(txq, smp_processor_id()); | 
 | 4410 | 		if (netif_tx_queue_stopped(txq) && | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 4411 | 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 4412 | 			netif_tx_wake_queue(txq); | 
 | 4413 | 		__netif_tx_unlock(txq); | 
| Michael Chan | 51b9146 | 2005-09-01 17:41:28 -0700 | [diff] [blame] | 4414 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4415 | } | 
 | 4416 |  | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 4417 | static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) | 
 | 4418 | { | 
 | 4419 | 	if (!ri->skb) | 
 | 4420 | 		return; | 
 | 4421 |  | 
 | 4422 | 	pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), | 
 | 4423 | 			 map_sz, PCI_DMA_FROMDEVICE); | 
 | 4424 | 	dev_kfree_skb_any(ri->skb); | 
 | 4425 | 	ri->skb = NULL; | 
 | 4426 | } | 
 | 4427 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4428 | /* Returns size of skb allocated or < 0 on error. | 
 | 4429 |  * | 
 | 4430 |  * We only need to fill in the address because the other members | 
 | 4431 |  * of the RX descriptor are invariant, see tg3_init_rings. | 
 | 4432 |  * | 
 | 4433 |  * Note the purposeful assymetry of cpu vs. chip accesses.  For | 
 | 4434 |  * posting buffers we only dirty the first cache line of the RX | 
 | 4435 |  * descriptor (containing the address).  Whereas for the RX status | 
 | 4436 |  * buffers the cpu only reads the last cacheline of the RX descriptor | 
 | 4437 |  * (to fetch the error flags, vlan tag, checksum, and opaque cookie). | 
 | 4438 |  */ | 
| Matt Carlson | 86b21e5 | 2009-11-13 13:03:45 +0000 | [diff] [blame] | 4439 | static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, | 
| Matt Carlson | a389616 | 2009-11-13 13:03:44 +0000 | [diff] [blame] | 4440 | 			    u32 opaque_key, u32 dest_idx_unmasked) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4441 | { | 
 | 4442 | 	struct tg3_rx_buffer_desc *desc; | 
 | 4443 | 	struct ring_info *map, *src_map; | 
 | 4444 | 	struct sk_buff *skb; | 
 | 4445 | 	dma_addr_t mapping; | 
 | 4446 | 	int skb_size, dest_idx; | 
 | 4447 |  | 
 | 4448 | 	src_map = NULL; | 
 | 4449 | 	switch (opaque_key) { | 
 | 4450 | 	case RXD_OPAQUE_RING_STD: | 
 | 4451 | 		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 4452 | 		desc = &tpr->rx_std[dest_idx]; | 
 | 4453 | 		map = &tpr->rx_std_buffers[dest_idx]; | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 4454 | 		skb_size = tp->rx_pkt_map_sz; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4455 | 		break; | 
 | 4456 |  | 
 | 4457 | 	case RXD_OPAQUE_RING_JUMBO: | 
 | 4458 | 		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 
| Matt Carlson | 79ed5ac | 2009-08-28 14:00:55 +0000 | [diff] [blame] | 4459 | 		desc = &tpr->rx_jmb[dest_idx].std; | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 4460 | 		map = &tpr->rx_jmb_buffers[dest_idx]; | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 4461 | 		skb_size = TG3_RX_JMB_MAP_SZ; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4462 | 		break; | 
 | 4463 |  | 
 | 4464 | 	default: | 
 | 4465 | 		return -EINVAL; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 4466 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4467 |  | 
 | 4468 | 	/* Do not overwrite any of the map or rp information | 
 | 4469 | 	 * until we are sure we can commit to a new buffer. | 
 | 4470 | 	 * | 
 | 4471 | 	 * Callers depend upon this behavior and assume that | 
 | 4472 | 	 * we leave everything unchanged if we fail. | 
 | 4473 | 	 */ | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 4474 | 	skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4475 | 	if (skb == NULL) | 
 | 4476 | 		return -ENOMEM; | 
 | 4477 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4478 | 	skb_reserve(skb, tp->rx_offset); | 
 | 4479 |  | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 4480 | 	mapping = pci_map_single(tp->pdev, skb->data, skb_size, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4481 | 				 PCI_DMA_FROMDEVICE); | 
| Matt Carlson | a21771d | 2009-11-02 14:25:31 +0000 | [diff] [blame] | 4482 | 	if (pci_dma_mapping_error(tp->pdev, mapping)) { | 
 | 4483 | 		dev_kfree_skb(skb); | 
 | 4484 | 		return -EIO; | 
 | 4485 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4486 |  | 
 | 4487 | 	map->skb = skb; | 
 | 4488 | 	pci_unmap_addr_set(map, mapping, mapping); | 
 | 4489 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4490 | 	desc->addr_hi = ((u64)mapping >> 32); | 
 | 4491 | 	desc->addr_lo = ((u64)mapping & 0xffffffff); | 
 | 4492 |  | 
 | 4493 | 	return skb_size; | 
 | 4494 | } | 
 | 4495 |  | 
 | 4496 | /* We only need to move over in the address because the other | 
 | 4497 |  * members of the RX descriptor are invariant.  See notes above | 
 | 4498 |  * tg3_alloc_rx_skb for full details. | 
 | 4499 |  */ | 
| Matt Carlson | a389616 | 2009-11-13 13:03:44 +0000 | [diff] [blame] | 4500 | static void tg3_recycle_rx(struct tg3_napi *tnapi, | 
 | 4501 | 			   struct tg3_rx_prodring_set *dpr, | 
 | 4502 | 			   u32 opaque_key, int src_idx, | 
 | 4503 | 			   u32 dest_idx_unmasked) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4504 | { | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4505 | 	struct tg3 *tp = tnapi->tp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4506 | 	struct tg3_rx_buffer_desc *src_desc, *dest_desc; | 
 | 4507 | 	struct ring_info *src_map, *dest_map; | 
 | 4508 | 	int dest_idx; | 
| Matt Carlson | a389616 | 2009-11-13 13:03:44 +0000 | [diff] [blame] | 4509 | 	struct tg3_rx_prodring_set *spr = &tp->prodring[0]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4510 |  | 
 | 4511 | 	switch (opaque_key) { | 
 | 4512 | 	case RXD_OPAQUE_RING_STD: | 
 | 4513 | 		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 
| Matt Carlson | a389616 | 2009-11-13 13:03:44 +0000 | [diff] [blame] | 4514 | 		dest_desc = &dpr->rx_std[dest_idx]; | 
 | 4515 | 		dest_map = &dpr->rx_std_buffers[dest_idx]; | 
 | 4516 | 		src_desc = &spr->rx_std[src_idx]; | 
 | 4517 | 		src_map = &spr->rx_std_buffers[src_idx]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4518 | 		break; | 
 | 4519 |  | 
 | 4520 | 	case RXD_OPAQUE_RING_JUMBO: | 
 | 4521 | 		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 
| Matt Carlson | a389616 | 2009-11-13 13:03:44 +0000 | [diff] [blame] | 4522 | 		dest_desc = &dpr->rx_jmb[dest_idx].std; | 
 | 4523 | 		dest_map = &dpr->rx_jmb_buffers[dest_idx]; | 
 | 4524 | 		src_desc = &spr->rx_jmb[src_idx].std; | 
 | 4525 | 		src_map = &spr->rx_jmb_buffers[src_idx]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4526 | 		break; | 
 | 4527 |  | 
 | 4528 | 	default: | 
 | 4529 | 		return; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 4530 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4531 |  | 
 | 4532 | 	dest_map->skb = src_map->skb; | 
 | 4533 | 	pci_unmap_addr_set(dest_map, mapping, | 
 | 4534 | 			   pci_unmap_addr(src_map, mapping)); | 
 | 4535 | 	dest_desc->addr_hi = src_desc->addr_hi; | 
 | 4536 | 	dest_desc->addr_lo = src_desc->addr_lo; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4537 | 	src_map->skb = NULL; | 
 | 4538 | } | 
 | 4539 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4540 | /* The RX ring scheme is composed of multiple rings which post fresh | 
 | 4541 |  * buffers to the chip, and one special ring the chip uses to report | 
 | 4542 |  * status back to the host. | 
 | 4543 |  * | 
 | 4544 |  * The special ring reports the status of received packets to the | 
 | 4545 |  * host.  The chip does not write into the original descriptor the | 
 | 4546 |  * RX buffer was obtained from.  The chip simply takes the original | 
 | 4547 |  * descriptor as provided by the host, updates the status and length | 
 | 4548 |  * field, then writes this into the next status ring entry. | 
 | 4549 |  * | 
 | 4550 |  * Each ring the host uses to post buffers to the chip is described | 
 | 4551 |  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives, | 
 | 4552 |  * it is first placed into the on-chip ram.  When the packet's length | 
 | 4553 |  * is known, it walks down the TG3_BDINFO entries to select the ring. | 
 | 4554 |  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO | 
 | 4555 |  * which is within the range of the new packet's length is chosen. | 
 | 4556 |  * | 
 | 4557 |  * The "separate ring for rx status" scheme may sound queer, but it makes | 
 | 4558 |  * sense from a cache coherency perspective.  If only the host writes | 
 | 4559 |  * to the buffer post rings, and only the chip writes to the rx status | 
 | 4560 |  * rings, then cache lines never move beyond shared-modified state. | 
 | 4561 |  * If both the host and chip were to write into the same ring, cache line | 
 | 4562 |  * eviction could occur since both entities want it in an exclusive state. | 
 | 4563 |  */ | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4564 | static int tg3_rx(struct tg3_napi *tnapi, int budget) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4565 | { | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4566 | 	struct tg3 *tp = tnapi->tp; | 
| Michael Chan | f92905d | 2006-06-29 20:14:29 -0700 | [diff] [blame] | 4567 | 	u32 work_mask, rx_std_posted = 0; | 
| Matt Carlson | 4361935 | 2009-11-13 13:03:47 +0000 | [diff] [blame] | 4568 | 	u32 std_prod_idx, jmb_prod_idx; | 
| Matt Carlson | 7233448 | 2009-08-28 14:03:01 +0000 | [diff] [blame] | 4569 | 	u32 sw_idx = tnapi->rx_rcb_ptr; | 
| Michael Chan | 483ba50 | 2005-04-25 15:14:03 -0700 | [diff] [blame] | 4570 | 	u16 hw_idx; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4571 | 	int received; | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 4572 | 	struct tg3_rx_prodring_set *tpr = tnapi->prodring; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4573 |  | 
| Matt Carlson | 8d9d7cf | 2009-09-01 13:19:05 +0000 | [diff] [blame] | 4574 | 	hw_idx = *(tnapi->rx_rcb_prod_idx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4575 | 	/* | 
 | 4576 | 	 * We need to order the read of hw_idx and the read of | 
 | 4577 | 	 * the opaque cookie. | 
 | 4578 | 	 */ | 
 | 4579 | 	rmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4580 | 	work_mask = 0; | 
 | 4581 | 	received = 0; | 
| Matt Carlson | 4361935 | 2009-11-13 13:03:47 +0000 | [diff] [blame] | 4582 | 	std_prod_idx = tpr->rx_std_prod_idx; | 
 | 4583 | 	jmb_prod_idx = tpr->rx_jmb_prod_idx; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4584 | 	while (sw_idx != hw_idx && budget > 0) { | 
| Matt Carlson | afc081f | 2009-11-13 13:03:43 +0000 | [diff] [blame] | 4585 | 		struct ring_info *ri; | 
| Matt Carlson | 7233448 | 2009-08-28 14:03:01 +0000 | [diff] [blame] | 4586 | 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4587 | 		unsigned int len; | 
 | 4588 | 		struct sk_buff *skb; | 
 | 4589 | 		dma_addr_t dma_addr; | 
 | 4590 | 		u32 opaque_key, desc_idx, *post_ptr; | 
 | 4591 |  | 
 | 4592 | 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; | 
 | 4593 | 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; | 
 | 4594 | 		if (opaque_key == RXD_OPAQUE_RING_STD) { | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 4595 | 			ri = &tp->prodring[0].rx_std_buffers[desc_idx]; | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 4596 | 			dma_addr = pci_unmap_addr(ri, mapping); | 
 | 4597 | 			skb = ri->skb; | 
| Matt Carlson | 4361935 | 2009-11-13 13:03:47 +0000 | [diff] [blame] | 4598 | 			post_ptr = &std_prod_idx; | 
| Michael Chan | f92905d | 2006-06-29 20:14:29 -0700 | [diff] [blame] | 4599 | 			rx_std_posted++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4600 | 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 4601 | 			ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 4602 | 			dma_addr = pci_unmap_addr(ri, mapping); | 
 | 4603 | 			skb = ri->skb; | 
| Matt Carlson | 4361935 | 2009-11-13 13:03:47 +0000 | [diff] [blame] | 4604 | 			post_ptr = &jmb_prod_idx; | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 4605 | 		} else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4606 | 			goto next_pkt_nopost; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4607 |  | 
 | 4608 | 		work_mask |= opaque_key; | 
 | 4609 |  | 
 | 4610 | 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 
 | 4611 | 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { | 
 | 4612 | 		drop_it: | 
| Matt Carlson | a389616 | 2009-11-13 13:03:44 +0000 | [diff] [blame] | 4613 | 			tg3_recycle_rx(tnapi, tpr, opaque_key, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4614 | 				       desc_idx, *post_ptr); | 
 | 4615 | 		drop_it_no_recycle: | 
 | 4616 | 			/* Other statistics kept track of by card. */ | 
 | 4617 | 			tp->net_stats.rx_dropped++; | 
 | 4618 | 			goto next_pkt; | 
 | 4619 | 		} | 
 | 4620 |  | 
| Matt Carlson | ad82926 | 2008-11-21 17:16:16 -0800 | [diff] [blame] | 4621 | 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - | 
 | 4622 | 		      ETH_FCS_LEN; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4623 |  | 
| Joe Perches | 8e95a20 | 2009-12-03 07:58:21 +0000 | [diff] [blame] | 4624 | 		if (len > RX_COPY_THRESHOLD && | 
 | 4625 | 		    tp->rx_offset == NET_IP_ALIGN) { | 
 | 4626 | 		    /* rx_offset will likely not equal NET_IP_ALIGN | 
 | 4627 | 		     * if this is a 5701 card running in PCI-X mode | 
 | 4628 | 		     * [see tg3_get_invariants()] | 
 | 4629 | 		     */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4630 | 			int skb_size; | 
 | 4631 |  | 
| Matt Carlson | 86b21e5 | 2009-11-13 13:03:45 +0000 | [diff] [blame] | 4632 | 			skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, | 
| Matt Carlson | afc081f | 2009-11-13 13:03:43 +0000 | [diff] [blame] | 4633 | 						    *post_ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4634 | 			if (skb_size < 0) | 
 | 4635 | 				goto drop_it; | 
 | 4636 |  | 
| Matt Carlson | afc081f | 2009-11-13 13:03:43 +0000 | [diff] [blame] | 4637 | 			ri->skb = NULL; | 
 | 4638 |  | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 4639 | 			pci_unmap_single(tp->pdev, dma_addr, skb_size, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4640 | 					 PCI_DMA_FROMDEVICE); | 
 | 4641 |  | 
 | 4642 | 			skb_put(skb, len); | 
 | 4643 | 		} else { | 
 | 4644 | 			struct sk_buff *copy_skb; | 
 | 4645 |  | 
| Matt Carlson | a389616 | 2009-11-13 13:03:44 +0000 | [diff] [blame] | 4646 | 			tg3_recycle_rx(tnapi, tpr, opaque_key, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4647 | 				       desc_idx, *post_ptr); | 
 | 4648 |  | 
| Matt Carlson | ad82926 | 2008-11-21 17:16:16 -0800 | [diff] [blame] | 4649 | 			copy_skb = netdev_alloc_skb(tp->dev, | 
 | 4650 | 						    len + TG3_RAW_IP_ALIGN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4651 | 			if (copy_skb == NULL) | 
 | 4652 | 				goto drop_it_no_recycle; | 
 | 4653 |  | 
| Matt Carlson | ad82926 | 2008-11-21 17:16:16 -0800 | [diff] [blame] | 4654 | 			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4655 | 			skb_put(copy_skb, len); | 
 | 4656 | 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 
| Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 4657 | 			skb_copy_from_linear_data(skb, copy_skb->data, len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4658 | 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 
 | 4659 |  | 
 | 4660 | 			/* We'll reuse the original ring buffer. */ | 
 | 4661 | 			skb = copy_skb; | 
 | 4662 | 		} | 
 | 4663 |  | 
 | 4664 | 		if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) && | 
 | 4665 | 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && | 
 | 4666 | 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) | 
 | 4667 | 		      >> RXD_TCPCSUM_SHIFT) == 0xffff)) | 
 | 4668 | 			skb->ip_summed = CHECKSUM_UNNECESSARY; | 
 | 4669 | 		else | 
 | 4670 | 			skb->ip_summed = CHECKSUM_NONE; | 
 | 4671 |  | 
 | 4672 | 		skb->protocol = eth_type_trans(skb, tp->dev); | 
| Matt Carlson | f7b493e | 2009-02-25 14:21:52 +0000 | [diff] [blame] | 4673 |  | 
 | 4674 | 		if (len > (tp->dev->mtu + ETH_HLEN) && | 
 | 4675 | 		    skb->protocol != htons(ETH_P_8021Q)) { | 
 | 4676 | 			dev_kfree_skb(skb); | 
 | 4677 | 			goto next_pkt; | 
 | 4678 | 		} | 
 | 4679 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4680 | #if TG3_VLAN_TAG_USED | 
 | 4681 | 		if (tp->vlgrp != NULL && | 
 | 4682 | 		    desc->type_flags & RXD_FLAG_VLAN) { | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4683 | 			vlan_gro_receive(&tnapi->napi, tp->vlgrp, | 
| Matt Carlson | 8ef0442 | 2009-08-28 14:01:37 +0000 | [diff] [blame] | 4684 | 					 desc->err_vlan & RXD_VLAN_MASK, skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4685 | 		} else | 
 | 4686 | #endif | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4687 | 			napi_gro_receive(&tnapi->napi, skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4688 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4689 | 		received++; | 
 | 4690 | 		budget--; | 
 | 4691 |  | 
 | 4692 | next_pkt: | 
 | 4693 | 		(*post_ptr)++; | 
| Michael Chan | f92905d | 2006-06-29 20:14:29 -0700 | [diff] [blame] | 4694 |  | 
 | 4695 | 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { | 
 | 4696 | 			u32 idx = *post_ptr % TG3_RX_RING_SIZE; | 
| Matt Carlson | 66711e6 | 2009-11-13 13:03:49 +0000 | [diff] [blame] | 4697 | 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx); | 
| Michael Chan | f92905d | 2006-06-29 20:14:29 -0700 | [diff] [blame] | 4698 | 			work_mask &= ~RXD_OPAQUE_RING_STD; | 
 | 4699 | 			rx_std_posted = 0; | 
 | 4700 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4701 | next_pkt_nopost: | 
| Michael Chan | 483ba50 | 2005-04-25 15:14:03 -0700 | [diff] [blame] | 4702 | 		sw_idx++; | 
| Eric Dumazet | 6b31a51 | 2007-02-06 13:29:21 -0800 | [diff] [blame] | 4703 | 		sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1); | 
| Michael Chan | 52f6d69 | 2005-04-25 15:14:32 -0700 | [diff] [blame] | 4704 |  | 
 | 4705 | 		/* Refresh hw_idx to see if there is new work */ | 
 | 4706 | 		if (sw_idx == hw_idx) { | 
| Matt Carlson | 8d9d7cf | 2009-09-01 13:19:05 +0000 | [diff] [blame] | 4707 | 			hw_idx = *(tnapi->rx_rcb_prod_idx); | 
| Michael Chan | 52f6d69 | 2005-04-25 15:14:32 -0700 | [diff] [blame] | 4708 | 			rmb(); | 
 | 4709 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4710 | 	} | 
 | 4711 |  | 
 | 4712 | 	/* ACK the status ring. */ | 
| Matt Carlson | 7233448 | 2009-08-28 14:03:01 +0000 | [diff] [blame] | 4713 | 	tnapi->rx_rcb_ptr = sw_idx; | 
 | 4714 | 	tw32_rx_mbox(tnapi->consmbox, sw_idx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4715 |  | 
 | 4716 | 	/* Refill RX ring(s). */ | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 4717 | 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) { | 
 | 4718 | 		if (work_mask & RXD_OPAQUE_RING_STD) { | 
 | 4719 | 			tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; | 
 | 4720 | 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, | 
 | 4721 | 				     tpr->rx_std_prod_idx); | 
 | 4722 | 		} | 
 | 4723 | 		if (work_mask & RXD_OPAQUE_RING_JUMBO) { | 
 | 4724 | 			tpr->rx_jmb_prod_idx = jmb_prod_idx % | 
 | 4725 | 					       TG3_RX_JUMBO_RING_SIZE; | 
 | 4726 | 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, | 
 | 4727 | 				     tpr->rx_jmb_prod_idx); | 
 | 4728 | 		} | 
 | 4729 | 		mmiowb(); | 
 | 4730 | 	} else if (work_mask) { | 
 | 4731 | 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be | 
 | 4732 | 		 * updated before the producer indices can be updated. | 
 | 4733 | 		 */ | 
 | 4734 | 		smp_wmb(); | 
 | 4735 |  | 
| Matt Carlson | 4361935 | 2009-11-13 13:03:47 +0000 | [diff] [blame] | 4736 | 		tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; | 
| Matt Carlson | 4361935 | 2009-11-13 13:03:47 +0000 | [diff] [blame] | 4737 | 		tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 4738 |  | 
 | 4739 | 		napi_schedule(&tp->napi[1].napi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4740 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4741 |  | 
 | 4742 | 	return received; | 
 | 4743 | } | 
 | 4744 |  | 
| Matt Carlson | 35f2d7d | 2009-11-13 13:03:41 +0000 | [diff] [blame] | 4745 | static void tg3_poll_link(struct tg3 *tp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4746 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4747 | 	/* handle link change and other phy events */ | 
 | 4748 | 	if (!(tp->tg3_flags & | 
 | 4749 | 	      (TG3_FLAG_USE_LINKCHG_REG | | 
 | 4750 | 	       TG3_FLAG_POLL_SERDES))) { | 
| Matt Carlson | 35f2d7d | 2009-11-13 13:03:41 +0000 | [diff] [blame] | 4751 | 		struct tg3_hw_status *sblk = tp->napi[0].hw_status; | 
 | 4752 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4753 | 		if (sblk->status & SD_STATUS_LINK_CHG) { | 
 | 4754 | 			sblk->status = SD_STATUS_UPDATED | | 
| Matt Carlson | 35f2d7d | 2009-11-13 13:03:41 +0000 | [diff] [blame] | 4755 | 				       (sblk->status & ~SD_STATUS_LINK_CHG); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 4756 | 			spin_lock(&tp->lock); | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 4757 | 			if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 
 | 4758 | 				tw32_f(MAC_STATUS, | 
 | 4759 | 				     (MAC_STATUS_SYNC_CHANGED | | 
 | 4760 | 				      MAC_STATUS_CFG_CHANGED | | 
 | 4761 | 				      MAC_STATUS_MI_COMPLETION | | 
 | 4762 | 				      MAC_STATUS_LNKSTATE_CHANGED)); | 
 | 4763 | 				udelay(40); | 
 | 4764 | 			} else | 
 | 4765 | 				tg3_setup_phy(tp, 0); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 4766 | 			spin_unlock(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4767 | 		} | 
 | 4768 | 	} | 
| Matt Carlson | 35f2d7d | 2009-11-13 13:03:41 +0000 | [diff] [blame] | 4769 | } | 
 | 4770 |  | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 4771 | static void tg3_rx_prodring_xfer(struct tg3 *tp, | 
 | 4772 | 				 struct tg3_rx_prodring_set *dpr, | 
 | 4773 | 				 struct tg3_rx_prodring_set *spr) | 
 | 4774 | { | 
 | 4775 | 	u32 si, di, cpycnt, src_prod_idx; | 
 | 4776 | 	int i; | 
 | 4777 |  | 
 | 4778 | 	while (1) { | 
 | 4779 | 		src_prod_idx = spr->rx_std_prod_idx; | 
 | 4780 |  | 
 | 4781 | 		/* Make sure updates to the rx_std_buffers[] entries and the | 
 | 4782 | 		 * standard producer index are seen in the correct order. | 
 | 4783 | 		 */ | 
 | 4784 | 		smp_rmb(); | 
 | 4785 |  | 
 | 4786 | 		if (spr->rx_std_cons_idx == src_prod_idx) | 
 | 4787 | 			break; | 
 | 4788 |  | 
 | 4789 | 		if (spr->rx_std_cons_idx < src_prod_idx) | 
 | 4790 | 			cpycnt = src_prod_idx - spr->rx_std_cons_idx; | 
 | 4791 | 		else | 
 | 4792 | 			cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; | 
 | 4793 |  | 
 | 4794 | 		cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); | 
 | 4795 |  | 
 | 4796 | 		si = spr->rx_std_cons_idx; | 
 | 4797 | 		di = dpr->rx_std_prod_idx; | 
 | 4798 |  | 
 | 4799 | 		memcpy(&dpr->rx_std_buffers[di], | 
 | 4800 | 		       &spr->rx_std_buffers[si], | 
 | 4801 | 		       cpycnt * sizeof(struct ring_info)); | 
 | 4802 |  | 
 | 4803 | 		for (i = 0; i < cpycnt; i++, di++, si++) { | 
 | 4804 | 			struct tg3_rx_buffer_desc *sbd, *dbd; | 
 | 4805 | 			sbd = &spr->rx_std[si]; | 
 | 4806 | 			dbd = &dpr->rx_std[di]; | 
 | 4807 | 			dbd->addr_hi = sbd->addr_hi; | 
 | 4808 | 			dbd->addr_lo = sbd->addr_lo; | 
 | 4809 | 		} | 
 | 4810 |  | 
 | 4811 | 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % | 
 | 4812 | 				       TG3_RX_RING_SIZE; | 
 | 4813 | 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % | 
 | 4814 | 				       TG3_RX_RING_SIZE; | 
 | 4815 | 	} | 
 | 4816 |  | 
 | 4817 | 	while (1) { | 
 | 4818 | 		src_prod_idx = spr->rx_jmb_prod_idx; | 
 | 4819 |  | 
 | 4820 | 		/* Make sure updates to the rx_jmb_buffers[] entries and | 
 | 4821 | 		 * the jumbo producer index are seen in the correct order. | 
 | 4822 | 		 */ | 
 | 4823 | 		smp_rmb(); | 
 | 4824 |  | 
 | 4825 | 		if (spr->rx_jmb_cons_idx == src_prod_idx) | 
 | 4826 | 			break; | 
 | 4827 |  | 
 | 4828 | 		if (spr->rx_jmb_cons_idx < src_prod_idx) | 
 | 4829 | 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; | 
 | 4830 | 		else | 
 | 4831 | 			cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; | 
 | 4832 |  | 
 | 4833 | 		cpycnt = min(cpycnt, | 
 | 4834 | 			     TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); | 
 | 4835 |  | 
 | 4836 | 		si = spr->rx_jmb_cons_idx; | 
 | 4837 | 		di = dpr->rx_jmb_prod_idx; | 
 | 4838 |  | 
 | 4839 | 		memcpy(&dpr->rx_jmb_buffers[di], | 
 | 4840 | 		       &spr->rx_jmb_buffers[si], | 
 | 4841 | 		       cpycnt * sizeof(struct ring_info)); | 
 | 4842 |  | 
 | 4843 | 		for (i = 0; i < cpycnt; i++, di++, si++) { | 
 | 4844 | 			struct tg3_rx_buffer_desc *sbd, *dbd; | 
 | 4845 | 			sbd = &spr->rx_jmb[si].std; | 
 | 4846 | 			dbd = &dpr->rx_jmb[di].std; | 
 | 4847 | 			dbd->addr_hi = sbd->addr_hi; | 
 | 4848 | 			dbd->addr_lo = sbd->addr_lo; | 
 | 4849 | 		} | 
 | 4850 |  | 
 | 4851 | 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % | 
 | 4852 | 				       TG3_RX_JUMBO_RING_SIZE; | 
 | 4853 | 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % | 
 | 4854 | 				       TG3_RX_JUMBO_RING_SIZE; | 
 | 4855 | 	} | 
 | 4856 | } | 
 | 4857 |  | 
| Matt Carlson | 35f2d7d | 2009-11-13 13:03:41 +0000 | [diff] [blame] | 4858 | static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | 
 | 4859 | { | 
 | 4860 | 	struct tg3 *tp = tnapi->tp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4861 |  | 
 | 4862 | 	/* run TX completion thread */ | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 4863 | 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4864 | 		tg3_tx(tnapi); | 
| David S. Miller | 6f53576 | 2007-10-11 18:08:29 -0700 | [diff] [blame] | 4865 | 		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 
| Michael Chan | 4fd7ab5 | 2007-10-12 01:39:50 -0700 | [diff] [blame] | 4866 | 			return work_done; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4867 | 	} | 
 | 4868 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4869 | 	/* run RX thread, within the bounds set by NAPI. | 
 | 4870 | 	 * All RX "locking" is done by ensuring outside | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 4871 | 	 * code synchronizes with tg3->napi.poll() | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4872 | 	 */ | 
| Matt Carlson | 8d9d7cf | 2009-09-01 13:19:05 +0000 | [diff] [blame] | 4873 | 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4874 | 		work_done += tg3_rx(tnapi, budget - work_done); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4875 |  | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 4876 | 	if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { | 
 | 4877 | 		int i; | 
 | 4878 | 		u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx; | 
 | 4879 | 		u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx; | 
 | 4880 |  | 
 | 4881 | 		for (i = 2; i < tp->irq_cnt; i++) | 
 | 4882 | 			tg3_rx_prodring_xfer(tp, tnapi->prodring, | 
 | 4883 | 					     tp->napi[i].prodring); | 
 | 4884 |  | 
 | 4885 | 		wmb(); | 
 | 4886 |  | 
 | 4887 | 		if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) { | 
 | 4888 | 			u32 mbox = TG3_RX_STD_PROD_IDX_REG; | 
 | 4889 | 			tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx); | 
 | 4890 | 		} | 
 | 4891 |  | 
 | 4892 | 		if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) { | 
 | 4893 | 			u32 mbox = TG3_RX_JMB_PROD_IDX_REG; | 
 | 4894 | 			tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx); | 
 | 4895 | 		} | 
 | 4896 |  | 
 | 4897 | 		mmiowb(); | 
 | 4898 | 	} | 
 | 4899 |  | 
| David S. Miller | 6f53576 | 2007-10-11 18:08:29 -0700 | [diff] [blame] | 4900 | 	return work_done; | 
 | 4901 | } | 
| David S. Miller | f7383c2 | 2005-05-18 22:50:53 -0700 | [diff] [blame] | 4902 |  | 
| Matt Carlson | 35f2d7d | 2009-11-13 13:03:41 +0000 | [diff] [blame] | 4903 | static int tg3_poll_msix(struct napi_struct *napi, int budget) | 
 | 4904 | { | 
 | 4905 | 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); | 
 | 4906 | 	struct tg3 *tp = tnapi->tp; | 
 | 4907 | 	int work_done = 0; | 
 | 4908 | 	struct tg3_hw_status *sblk = tnapi->hw_status; | 
 | 4909 |  | 
 | 4910 | 	while (1) { | 
 | 4911 | 		work_done = tg3_poll_work(tnapi, work_done, budget); | 
 | 4912 |  | 
 | 4913 | 		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 
 | 4914 | 			goto tx_recovery; | 
 | 4915 |  | 
 | 4916 | 		if (unlikely(work_done >= budget)) | 
 | 4917 | 			break; | 
 | 4918 |  | 
 | 4919 | 		/* tp->last_tag is used in tg3_restart_ints() below | 
 | 4920 | 		 * to tell the hw how much work has been processed, | 
 | 4921 | 		 * so we must read it before checking for more work. | 
 | 4922 | 		 */ | 
 | 4923 | 		tnapi->last_tag = sblk->status_tag; | 
 | 4924 | 		tnapi->last_irq_tag = tnapi->last_tag; | 
 | 4925 | 		rmb(); | 
 | 4926 |  | 
 | 4927 | 		/* check for RX/TX work to do */ | 
 | 4928 | 		if (sblk->idx[0].tx_consumer == tnapi->tx_cons && | 
 | 4929 | 		    *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) { | 
 | 4930 | 			napi_complete(napi); | 
 | 4931 | 			/* Reenable interrupts. */ | 
 | 4932 | 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); | 
 | 4933 | 			mmiowb(); | 
 | 4934 | 			break; | 
 | 4935 | 		} | 
 | 4936 | 	} | 
 | 4937 |  | 
 | 4938 | 	return work_done; | 
 | 4939 |  | 
 | 4940 | tx_recovery: | 
 | 4941 | 	/* work_done is guaranteed to be less than budget. */ | 
 | 4942 | 	napi_complete(napi); | 
 | 4943 | 	schedule_work(&tp->reset_task); | 
 | 4944 | 	return work_done; | 
 | 4945 | } | 
 | 4946 |  | 
| David S. Miller | 6f53576 | 2007-10-11 18:08:29 -0700 | [diff] [blame] | 4947 | static int tg3_poll(struct napi_struct *napi, int budget) | 
 | 4948 | { | 
| Matt Carlson | 8ef0442 | 2009-08-28 14:01:37 +0000 | [diff] [blame] | 4949 | 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); | 
 | 4950 | 	struct tg3 *tp = tnapi->tp; | 
| David S. Miller | 6f53576 | 2007-10-11 18:08:29 -0700 | [diff] [blame] | 4951 | 	int work_done = 0; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 4952 | 	struct tg3_hw_status *sblk = tnapi->hw_status; | 
| David S. Miller | 6f53576 | 2007-10-11 18:08:29 -0700 | [diff] [blame] | 4953 |  | 
 | 4954 | 	while (1) { | 
| Matt Carlson | 35f2d7d | 2009-11-13 13:03:41 +0000 | [diff] [blame] | 4955 | 		tg3_poll_link(tp); | 
 | 4956 |  | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4957 | 		work_done = tg3_poll_work(tnapi, work_done, budget); | 
| David S. Miller | 6f53576 | 2007-10-11 18:08:29 -0700 | [diff] [blame] | 4958 |  | 
 | 4959 | 		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 
 | 4960 | 			goto tx_recovery; | 
 | 4961 |  | 
 | 4962 | 		if (unlikely(work_done >= budget)) | 
 | 4963 | 			break; | 
 | 4964 |  | 
| Michael Chan | 4fd7ab5 | 2007-10-12 01:39:50 -0700 | [diff] [blame] | 4965 | 		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4966 | 			/* tp->last_tag is used in tg3_int_reenable() below | 
| Michael Chan | 4fd7ab5 | 2007-10-12 01:39:50 -0700 | [diff] [blame] | 4967 | 			 * to tell the hw how much work has been processed, | 
 | 4968 | 			 * so we must read it before checking for more work. | 
 | 4969 | 			 */ | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 4970 | 			tnapi->last_tag = sblk->status_tag; | 
 | 4971 | 			tnapi->last_irq_tag = tnapi->last_tag; | 
| Michael Chan | 4fd7ab5 | 2007-10-12 01:39:50 -0700 | [diff] [blame] | 4972 | 			rmb(); | 
 | 4973 | 		} else | 
 | 4974 | 			sblk->status &= ~SD_STATUS_UPDATED; | 
 | 4975 |  | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4976 | 		if (likely(!tg3_has_work(tnapi))) { | 
| Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 4977 | 			napi_complete(napi); | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 4978 | 			tg3_int_reenable(tnapi); | 
| David S. Miller | 6f53576 | 2007-10-11 18:08:29 -0700 | [diff] [blame] | 4979 | 			break; | 
 | 4980 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4981 | 	} | 
 | 4982 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 4983 | 	return work_done; | 
| David S. Miller | 6f53576 | 2007-10-11 18:08:29 -0700 | [diff] [blame] | 4984 |  | 
 | 4985 | tx_recovery: | 
| Michael Chan | 4fd7ab5 | 2007-10-12 01:39:50 -0700 | [diff] [blame] | 4986 | 	/* work_done is guaranteed to be less than budget. */ | 
| Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 4987 | 	napi_complete(napi); | 
| David S. Miller | 6f53576 | 2007-10-11 18:08:29 -0700 | [diff] [blame] | 4988 | 	schedule_work(&tp->reset_task); | 
| Michael Chan | 4fd7ab5 | 2007-10-12 01:39:50 -0700 | [diff] [blame] | 4989 | 	return work_done; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4990 | } | 
 | 4991 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 4992 | static void tg3_irq_quiesce(struct tg3 *tp) | 
 | 4993 | { | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 4994 | 	int i; | 
 | 4995 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 4996 | 	BUG_ON(tp->irq_sync); | 
 | 4997 |  | 
 | 4998 | 	tp->irq_sync = 1; | 
 | 4999 | 	smp_mb(); | 
 | 5000 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 5001 | 	for (i = 0; i < tp->irq_cnt; i++) | 
 | 5002 | 		synchronize_irq(tp->napi[i].irq_vec); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5003 | } | 
 | 5004 |  | 
 | 5005 | static inline int tg3_irq_sync(struct tg3 *tp) | 
 | 5006 | { | 
 | 5007 | 	return tp->irq_sync; | 
 | 5008 | } | 
 | 5009 |  | 
 | 5010 | /* Fully shutdown all tg3 driver activity elsewhere in the system. | 
 | 5011 |  * If irq_sync is non-zero, then the IRQ handler must be synchronized | 
 | 5012 |  * with as well.  Most of the time, this is not necessary except when | 
 | 5013 |  * shutting down the device. | 
 | 5014 |  */ | 
 | 5015 | static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) | 
 | 5016 | { | 
| Michael Chan | 4696654 | 2007-07-11 19:47:19 -0700 | [diff] [blame] | 5017 | 	spin_lock_bh(&tp->lock); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5018 | 	if (irq_sync) | 
 | 5019 | 		tg3_irq_quiesce(tp); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5020 | } | 
 | 5021 |  | 
 | 5022 | static inline void tg3_full_unlock(struct tg3 *tp) | 
 | 5023 | { | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5024 | 	spin_unlock_bh(&tp->lock); | 
 | 5025 | } | 
 | 5026 |  | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 5027 | /* One-shot MSI handler - Chip automatically disables interrupt | 
 | 5028 |  * after sending MSI so driver doesn't have to do it. | 
 | 5029 |  */ | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 5030 | static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 5031 | { | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 5032 | 	struct tg3_napi *tnapi = dev_id; | 
 | 5033 | 	struct tg3 *tp = tnapi->tp; | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 5034 |  | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 5035 | 	prefetch(tnapi->hw_status); | 
| Matt Carlson | 0c1d0e2 | 2009-09-01 13:16:33 +0000 | [diff] [blame] | 5036 | 	if (tnapi->rx_rcb) | 
 | 5037 | 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 5038 |  | 
 | 5039 | 	if (likely(!tg3_irq_sync(tp))) | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 5040 | 		napi_schedule(&tnapi->napi); | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 5041 |  | 
 | 5042 | 	return IRQ_HANDLED; | 
 | 5043 | } | 
 | 5044 |  | 
| Michael Chan | 88b06bc | 2005-04-21 17:13:25 -0700 | [diff] [blame] | 5045 | /* MSI ISR - No need to check for interrupt sharing and no need to | 
 | 5046 |  * flush status block and interrupt mailbox. PCI ordering rules | 
 | 5047 |  * guarantee that MSI will arrive after the status block. | 
 | 5048 |  */ | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 5049 | static irqreturn_t tg3_msi(int irq, void *dev_id) | 
| Michael Chan | 88b06bc | 2005-04-21 17:13:25 -0700 | [diff] [blame] | 5050 | { | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 5051 | 	struct tg3_napi *tnapi = dev_id; | 
 | 5052 | 	struct tg3 *tp = tnapi->tp; | 
| Michael Chan | 88b06bc | 2005-04-21 17:13:25 -0700 | [diff] [blame] | 5053 |  | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 5054 | 	prefetch(tnapi->hw_status); | 
| Matt Carlson | 0c1d0e2 | 2009-09-01 13:16:33 +0000 | [diff] [blame] | 5055 | 	if (tnapi->rx_rcb) | 
 | 5056 | 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); | 
| Michael Chan | 88b06bc | 2005-04-21 17:13:25 -0700 | [diff] [blame] | 5057 | 	/* | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 5058 | 	 * Writing any value to intr-mbox-0 clears PCI INTA# and | 
| Michael Chan | 88b06bc | 2005-04-21 17:13:25 -0700 | [diff] [blame] | 5059 | 	 * chip-internal interrupt pending events. | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 5060 | 	 * Writing non-zero to intr-mbox-0 additional tells the | 
| Michael Chan | 88b06bc | 2005-04-21 17:13:25 -0700 | [diff] [blame] | 5061 | 	 * NIC to stop sending us irqs, engaging "in-intr-handler" | 
 | 5062 | 	 * event coalescing. | 
 | 5063 | 	 */ | 
 | 5064 | 	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 
| Michael Chan | 6148748 | 2005-09-05 17:53:19 -0700 | [diff] [blame] | 5065 | 	if (likely(!tg3_irq_sync(tp))) | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 5066 | 		napi_schedule(&tnapi->napi); | 
| Michael Chan | 6148748 | 2005-09-05 17:53:19 -0700 | [diff] [blame] | 5067 |  | 
| Michael Chan | 88b06bc | 2005-04-21 17:13:25 -0700 | [diff] [blame] | 5068 | 	return IRQ_RETVAL(1); | 
 | 5069 | } | 
 | 5070 |  | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 5071 | static irqreturn_t tg3_interrupt(int irq, void *dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5072 | { | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 5073 | 	struct tg3_napi *tnapi = dev_id; | 
 | 5074 | 	struct tg3 *tp = tnapi->tp; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 5075 | 	struct tg3_hw_status *sblk = tnapi->hw_status; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5076 | 	unsigned int handled = 1; | 
 | 5077 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5078 | 	/* In INTx mode, it is possible for the interrupt to arrive at | 
 | 5079 | 	 * the CPU before the status block posted prior to the interrupt. | 
 | 5080 | 	 * Reading the PCI State register will confirm whether the | 
 | 5081 | 	 * interrupt is ours and will flush the status block. | 
 | 5082 | 	 */ | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 5083 | 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { | 
 | 5084 | 		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || | 
 | 5085 | 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | 
 | 5086 | 			handled = 0; | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5087 | 			goto out; | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 5088 | 		} | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 5089 | 	} | 
 | 5090 |  | 
 | 5091 | 	/* | 
 | 5092 | 	 * Writing any value to intr-mbox-0 clears PCI INTA# and | 
 | 5093 | 	 * chip-internal interrupt pending events. | 
 | 5094 | 	 * Writing non-zero to intr-mbox-0 additional tells the | 
 | 5095 | 	 * NIC to stop sending us irqs, engaging "in-intr-handler" | 
 | 5096 | 	 * event coalescing. | 
| Michael Chan | c04cb34 | 2007-05-07 00:26:15 -0700 | [diff] [blame] | 5097 | 	 * | 
 | 5098 | 	 * Flush the mailbox to de-assert the IRQ immediately to prevent | 
 | 5099 | 	 * spurious interrupts.  The flush impacts performance but | 
 | 5100 | 	 * excessive spurious interrupts can be worse in some cases. | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 5101 | 	 */ | 
| Michael Chan | c04cb34 | 2007-05-07 00:26:15 -0700 | [diff] [blame] | 5102 | 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 5103 | 	if (tg3_irq_sync(tp)) | 
 | 5104 | 		goto out; | 
 | 5105 | 	sblk->status &= ~SD_STATUS_UPDATED; | 
| Matt Carlson | 17375d2 | 2009-08-28 14:02:18 +0000 | [diff] [blame] | 5106 | 	if (likely(tg3_has_work(tnapi))) { | 
| Matt Carlson | 7233448 | 2009-08-28 14:03:01 +0000 | [diff] [blame] | 5107 | 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 5108 | 		napi_schedule(&tnapi->napi); | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 5109 | 	} else { | 
 | 5110 | 		/* No work, shared interrupt perhaps?  re-enable | 
 | 5111 | 		 * interrupts, and flush that PCI write | 
 | 5112 | 		 */ | 
 | 5113 | 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 
 | 5114 | 			       0x00000000); | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 5115 | 	} | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5116 | out: | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 5117 | 	return IRQ_RETVAL(handled); | 
 | 5118 | } | 
 | 5119 |  | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 5120 | static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 5121 | { | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 5122 | 	struct tg3_napi *tnapi = dev_id; | 
 | 5123 | 	struct tg3 *tp = tnapi->tp; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 5124 | 	struct tg3_hw_status *sblk = tnapi->hw_status; | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 5125 | 	unsigned int handled = 1; | 
 | 5126 |  | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 5127 | 	/* In INTx mode, it is possible for the interrupt to arrive at | 
 | 5128 | 	 * the CPU before the status block posted prior to the interrupt. | 
 | 5129 | 	 * Reading the PCI State register will confirm whether the | 
 | 5130 | 	 * interrupt is ours and will flush the status block. | 
 | 5131 | 	 */ | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 5132 | 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 5133 | 		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) || | 
 | 5134 | 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | 
 | 5135 | 			handled = 0; | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5136 | 			goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5137 | 		} | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 5138 | 	} | 
 | 5139 |  | 
 | 5140 | 	/* | 
 | 5141 | 	 * writing any value to intr-mbox-0 clears PCI INTA# and | 
 | 5142 | 	 * chip-internal interrupt pending events. | 
 | 5143 | 	 * writing non-zero to intr-mbox-0 additional tells the | 
 | 5144 | 	 * NIC to stop sending us irqs, engaging "in-intr-handler" | 
 | 5145 | 	 * event coalescing. | 
| Michael Chan | c04cb34 | 2007-05-07 00:26:15 -0700 | [diff] [blame] | 5146 | 	 * | 
 | 5147 | 	 * Flush the mailbox to de-assert the IRQ immediately to prevent | 
 | 5148 | 	 * spurious interrupts.  The flush impacts performance but | 
 | 5149 | 	 * excessive spurious interrupts can be worse in some cases. | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 5150 | 	 */ | 
| Michael Chan | c04cb34 | 2007-05-07 00:26:15 -0700 | [diff] [blame] | 5151 | 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); | 
| Matt Carlson | 624f8e5 | 2009-04-20 06:55:01 +0000 | [diff] [blame] | 5152 |  | 
 | 5153 | 	/* | 
 | 5154 | 	 * In a shared interrupt configuration, sometimes other devices' | 
 | 5155 | 	 * interrupts will scream.  We record the current status tag here | 
 | 5156 | 	 * so that the above check can report that the screaming interrupts | 
 | 5157 | 	 * are unhandled.  Eventually they will be silenced. | 
 | 5158 | 	 */ | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 5159 | 	tnapi->last_irq_tag = sblk->status_tag; | 
| Matt Carlson | 624f8e5 | 2009-04-20 06:55:01 +0000 | [diff] [blame] | 5160 |  | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 5161 | 	if (tg3_irq_sync(tp)) | 
 | 5162 | 		goto out; | 
| Matt Carlson | 624f8e5 | 2009-04-20 06:55:01 +0000 | [diff] [blame] | 5163 |  | 
| Matt Carlson | 7233448 | 2009-08-28 14:03:01 +0000 | [diff] [blame] | 5164 | 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); | 
| Matt Carlson | 624f8e5 | 2009-04-20 06:55:01 +0000 | [diff] [blame] | 5165 |  | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 5166 | 	napi_schedule(&tnapi->napi); | 
| Matt Carlson | 624f8e5 | 2009-04-20 06:55:01 +0000 | [diff] [blame] | 5167 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5168 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5169 | 	return IRQ_RETVAL(handled); | 
 | 5170 | } | 
 | 5171 |  | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 5172 | /* ISR for interrupt test */ | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 5173 | static irqreturn_t tg3_test_isr(int irq, void *dev_id) | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 5174 | { | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 5175 | 	struct tg3_napi *tnapi = dev_id; | 
 | 5176 | 	struct tg3 *tp = tnapi->tp; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 5177 | 	struct tg3_hw_status *sblk = tnapi->hw_status; | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 5178 |  | 
| Michael Chan | f9804dd | 2005-09-27 12:13:10 -0700 | [diff] [blame] | 5179 | 	if ((sblk->status & SD_STATUS_UPDATED) || | 
 | 5180 | 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 5181 | 		tg3_disable_ints(tp); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 5182 | 		return IRQ_RETVAL(1); | 
 | 5183 | 	} | 
 | 5184 | 	return IRQ_RETVAL(0); | 
 | 5185 | } | 
 | 5186 |  | 
| Gary Zambrano | 8e7a22e | 2006-04-29 18:59:13 -0700 | [diff] [blame] | 5187 | static int tg3_init_hw(struct tg3 *, int); | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 5188 | static int tg3_halt(struct tg3 *, int, int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5189 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 5190 | /* Restart hardware after configuration changes, self-test, etc. | 
 | 5191 |  * Invoked with tp->lock held. | 
 | 5192 |  */ | 
 | 5193 | static int tg3_restart_hw(struct tg3 *tp, int reset_phy) | 
| Eric Dumazet | 78c6146 | 2008-04-24 23:33:06 -0700 | [diff] [blame] | 5194 | 	__releases(tp->lock) | 
 | 5195 | 	__acquires(tp->lock) | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 5196 | { | 
 | 5197 | 	int err; | 
 | 5198 |  | 
 | 5199 | 	err = tg3_init_hw(tp, reset_phy); | 
 | 5200 | 	if (err) { | 
 | 5201 | 		printk(KERN_ERR PFX "%s: Failed to re-initialize device, " | 
 | 5202 | 		       "aborting.\n", tp->dev->name); | 
 | 5203 | 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
 | 5204 | 		tg3_full_unlock(tp); | 
 | 5205 | 		del_timer_sync(&tp->timer); | 
 | 5206 | 		tp->irq_sync = 0; | 
| Matt Carlson | fed9781 | 2009-09-01 13:10:19 +0000 | [diff] [blame] | 5207 | 		tg3_napi_enable(tp); | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 5208 | 		dev_close(tp->dev); | 
 | 5209 | 		tg3_full_lock(tp, 0); | 
 | 5210 | 	} | 
 | 5211 | 	return err; | 
 | 5212 | } | 
 | 5213 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5214 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 5215 | static void tg3_poll_controller(struct net_device *dev) | 
 | 5216 | { | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 5217 | 	int i; | 
| Michael Chan | 88b06bc | 2005-04-21 17:13:25 -0700 | [diff] [blame] | 5218 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 5219 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 5220 | 	for (i = 0; i < tp->irq_cnt; i++) | 
 | 5221 | 		tg3_interrupt(tp->napi[i].irq_vec, dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5222 | } | 
 | 5223 | #endif | 
 | 5224 |  | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 5225 | static void tg3_reset_task(struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5226 | { | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 5227 | 	struct tg3 *tp = container_of(work, struct tg3, reset_task); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 5228 | 	int err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5229 | 	unsigned int restart_timer; | 
 | 5230 |  | 
| Michael Chan | 7faa006 | 2006-02-02 17:29:28 -0800 | [diff] [blame] | 5231 | 	tg3_full_lock(tp, 0); | 
| Michael Chan | 7faa006 | 2006-02-02 17:29:28 -0800 | [diff] [blame] | 5232 |  | 
 | 5233 | 	if (!netif_running(tp->dev)) { | 
| Michael Chan | 7faa006 | 2006-02-02 17:29:28 -0800 | [diff] [blame] | 5234 | 		tg3_full_unlock(tp); | 
 | 5235 | 		return; | 
 | 5236 | 	} | 
 | 5237 |  | 
 | 5238 | 	tg3_full_unlock(tp); | 
 | 5239 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 5240 | 	tg3_phy_stop(tp); | 
 | 5241 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5242 | 	tg3_netif_stop(tp); | 
 | 5243 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5244 | 	tg3_full_lock(tp, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5245 |  | 
 | 5246 | 	restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; | 
 | 5247 | 	tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; | 
 | 5248 |  | 
| Michael Chan | df3e654 | 2006-05-26 17:48:07 -0700 | [diff] [blame] | 5249 | 	if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) { | 
 | 5250 | 		tp->write32_tx_mbox = tg3_write32_tx_mbox; | 
 | 5251 | 		tp->write32_rx_mbox = tg3_write_flush_reg32; | 
 | 5252 | 		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; | 
 | 5253 | 		tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING; | 
 | 5254 | 	} | 
 | 5255 |  | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 5256 | 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 5257 | 	err = tg3_init_hw(tp, 1); | 
 | 5258 | 	if (err) | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 5259 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5260 |  | 
 | 5261 | 	tg3_netif_start(tp); | 
 | 5262 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5263 | 	if (restart_timer) | 
 | 5264 | 		mod_timer(&tp->timer, jiffies + 1); | 
| Michael Chan | 7faa006 | 2006-02-02 17:29:28 -0800 | [diff] [blame] | 5265 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 5266 | out: | 
| Michael Chan | 7faa006 | 2006-02-02 17:29:28 -0800 | [diff] [blame] | 5267 | 	tg3_full_unlock(tp); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 5268 |  | 
 | 5269 | 	if (!err) | 
 | 5270 | 		tg3_phy_start(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5271 | } | 
 | 5272 |  | 
| Michael Chan | b040875 | 2007-02-13 12:18:30 -0800 | [diff] [blame] | 5273 | static void tg3_dump_short_state(struct tg3 *tp) | 
 | 5274 | { | 
 | 5275 | 	printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n", | 
 | 5276 | 	       tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS)); | 
 | 5277 | 	printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n", | 
 | 5278 | 	       tr32(RDMAC_STATUS), tr32(WDMAC_STATUS)); | 
 | 5279 | } | 
 | 5280 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5281 | static void tg3_tx_timeout(struct net_device *dev) | 
 | 5282 | { | 
 | 5283 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 5284 |  | 
| Michael Chan | b040875 | 2007-02-13 12:18:30 -0800 | [diff] [blame] | 5285 | 	if (netif_msg_tx_err(tp)) { | 
| Michael Chan | 9f88f29 | 2006-12-07 00:22:54 -0800 | [diff] [blame] | 5286 | 		printk(KERN_ERR PFX "%s: transmit timed out, resetting\n", | 
 | 5287 | 		       dev->name); | 
| Michael Chan | b040875 | 2007-02-13 12:18:30 -0800 | [diff] [blame] | 5288 | 		tg3_dump_short_state(tp); | 
 | 5289 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5290 |  | 
 | 5291 | 	schedule_work(&tp->reset_task); | 
 | 5292 | } | 
 | 5293 |  | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5294 | /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ | 
 | 5295 | static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) | 
 | 5296 | { | 
 | 5297 | 	u32 base = (u32) mapping & 0xffffffff; | 
 | 5298 |  | 
 | 5299 | 	return ((base > 0xffffdcc0) && | 
 | 5300 | 		(base + len + 8 < base)); | 
 | 5301 | } | 
 | 5302 |  | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 5303 | /* Test for DMA addresses > 40-bit */ | 
 | 5304 | static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, | 
 | 5305 | 					  int len) | 
 | 5306 | { | 
 | 5307 | #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) | 
| Michael Chan | 6728a8e | 2006-03-27 23:16:49 -0800 | [diff] [blame] | 5308 | 	if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) | 
| Yang Hongyang | 50cf156 | 2009-04-06 19:01:14 -0700 | [diff] [blame] | 5309 | 		return (((u64) mapping + len) > DMA_BIT_MASK(40)); | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 5310 | 	return 0; | 
 | 5311 | #else | 
 | 5312 | 	return 0; | 
 | 5313 | #endif | 
 | 5314 | } | 
 | 5315 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5316 | static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5317 |  | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 5318 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5319 | static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | 
 | 5320 | 				       struct sk_buff *skb, u32 last_plus_one, | 
 | 5321 | 				       u32 *start, u32 base_flags, u32 mss) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5322 | { | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5323 | 	struct tg3 *tp = tnapi->tp; | 
| Matt Carlson | 41588ba | 2008-04-19 18:12:33 -0700 | [diff] [blame] | 5324 | 	struct sk_buff *new_skb; | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5325 | 	dma_addr_t new_addr = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5326 | 	u32 entry = *start; | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5327 | 	int i, ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5328 |  | 
| Matt Carlson | 41588ba | 2008-04-19 18:12:33 -0700 | [diff] [blame] | 5329 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) | 
 | 5330 | 		new_skb = skb_copy(skb, GFP_ATOMIC); | 
 | 5331 | 	else { | 
 | 5332 | 		int more_headroom = 4 - ((unsigned long)skb->data & 3); | 
 | 5333 |  | 
 | 5334 | 		new_skb = skb_copy_expand(skb, | 
 | 5335 | 					  skb_headroom(skb) + more_headroom, | 
 | 5336 | 					  skb_tailroom(skb), GFP_ATOMIC); | 
 | 5337 | 	} | 
 | 5338 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5339 | 	if (!new_skb) { | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5340 | 		ret = -1; | 
 | 5341 | 	} else { | 
 | 5342 | 		/* New SKB is guaranteed to be linear. */ | 
 | 5343 | 		entry = *start; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5344 | 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, | 
 | 5345 | 					  PCI_DMA_TODEVICE); | 
 | 5346 | 		/* Make sure the mapping succeeded */ | 
 | 5347 | 		if (pci_dma_mapping_error(tp->pdev, new_addr)) { | 
 | 5348 | 			ret = -1; | 
 | 5349 | 			dev_kfree_skb(new_skb); | 
 | 5350 | 			new_skb = NULL; | 
| David S. Miller | 90079ce | 2008-09-11 04:52:51 -0700 | [diff] [blame] | 5351 |  | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5352 | 		/* Make sure new skb does not cross any 4G boundaries. | 
 | 5353 | 		 * Drop the packet if it does. | 
 | 5354 | 		 */ | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5355 | 		} else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && | 
 | 5356 | 			    tg3_4g_overflow_test(new_addr, new_skb->len)) { | 
 | 5357 | 			pci_unmap_single(tp->pdev, new_addr, new_skb->len, | 
 | 5358 | 					 PCI_DMA_TODEVICE); | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5359 | 			ret = -1; | 
 | 5360 | 			dev_kfree_skb(new_skb); | 
 | 5361 | 			new_skb = NULL; | 
 | 5362 | 		} else { | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5363 | 			tg3_set_txd(tnapi, entry, new_addr, new_skb->len, | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5364 | 				    base_flags, 1 | (mss << 1)); | 
 | 5365 | 			*start = NEXT_TX(entry); | 
 | 5366 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5367 | 	} | 
 | 5368 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5369 | 	/* Now clean up the sw ring entries. */ | 
 | 5370 | 	i = 0; | 
 | 5371 | 	while (entry != last_plus_one) { | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5372 | 		int len; | 
 | 5373 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5374 | 		if (i == 0) | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5375 | 			len = skb_headlen(skb); | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5376 | 		else | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5377 | 			len = skb_shinfo(skb)->frags[i-1].size; | 
 | 5378 |  | 
 | 5379 | 		pci_unmap_single(tp->pdev, | 
 | 5380 | 				 pci_unmap_addr(&tnapi->tx_buffers[entry], | 
 | 5381 | 						mapping), | 
 | 5382 | 				 len, PCI_DMA_TODEVICE); | 
 | 5383 | 		if (i == 0) { | 
 | 5384 | 			tnapi->tx_buffers[entry].skb = new_skb; | 
 | 5385 | 			pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | 
 | 5386 | 					   new_addr); | 
 | 5387 | 		} else { | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5388 | 			tnapi->tx_buffers[entry].skb = NULL; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5389 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5390 | 		entry = NEXT_TX(entry); | 
 | 5391 | 		i++; | 
 | 5392 | 	} | 
 | 5393 |  | 
 | 5394 | 	dev_kfree_skb(skb); | 
 | 5395 |  | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5396 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5397 | } | 
 | 5398 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5399 | static void tg3_set_txd(struct tg3_napi *tnapi, int entry, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5400 | 			dma_addr_t mapping, int len, u32 flags, | 
 | 5401 | 			u32 mss_and_is_end) | 
 | 5402 | { | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5403 | 	struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5404 | 	int is_end = (mss_and_is_end & 0x1); | 
 | 5405 | 	u32 mss = (mss_and_is_end >> 1); | 
 | 5406 | 	u32 vlan_tag = 0; | 
 | 5407 |  | 
 | 5408 | 	if (is_end) | 
 | 5409 | 		flags |= TXD_FLAG_END; | 
 | 5410 | 	if (flags & TXD_FLAG_VLAN) { | 
 | 5411 | 		vlan_tag = flags >> 16; | 
 | 5412 | 		flags &= 0xffff; | 
 | 5413 | 	} | 
 | 5414 | 	vlan_tag |= (mss << TXD_MSS_SHIFT); | 
 | 5415 |  | 
 | 5416 | 	txd->addr_hi = ((u64) mapping >> 32); | 
 | 5417 | 	txd->addr_lo = ((u64) mapping & 0xffffffff); | 
 | 5418 | 	txd->len_flags = (len << TXD_LEN_SHIFT) | flags; | 
 | 5419 | 	txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; | 
 | 5420 | } | 
 | 5421 |  | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5422 | /* hard_start_xmit for devices that don't have any bugs and | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 5423 |  * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5424 |  */ | 
| Stephen Hemminger | 61357325 | 2009-08-31 19:50:58 +0000 | [diff] [blame] | 5425 | static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | 
 | 5426 | 				  struct net_device *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5427 | { | 
 | 5428 | 	struct tg3 *tp = netdev_priv(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5429 | 	u32 len, entry, base_flags, mss; | 
| David S. Miller | 90079ce | 2008-09-11 04:52:51 -0700 | [diff] [blame] | 5430 | 	dma_addr_t mapping; | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 5431 | 	struct tg3_napi *tnapi; | 
 | 5432 | 	struct netdev_queue *txq; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5433 | 	unsigned int i, last; | 
 | 5434 |  | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5435 |  | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 5436 | 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 
 | 5437 | 	tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 5438 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 5439 | 		tnapi++; | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5440 |  | 
| Michael Chan | 00b7050 | 2006-06-17 21:58:45 -0700 | [diff] [blame] | 5441 | 	/* We are running in BH disabled context with netif_tx_lock | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 5442 | 	 * and TX reclaim runs via tp->napi.poll inside of a software | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5443 | 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have | 
 | 5444 | 	 * no IRQ context deadlocks to worry about either.  Rejoice! | 
 | 5445 | 	 */ | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5446 | 	if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 5447 | 		if (!netif_tx_queue_stopped(txq)) { | 
 | 5448 | 			netif_tx_stop_queue(txq); | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5449 |  | 
 | 5450 | 			/* This is a hard error, log it. */ | 
 | 5451 | 			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 
 | 5452 | 			       "queue awake!\n", dev->name); | 
 | 5453 | 		} | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5454 | 		return NETDEV_TX_BUSY; | 
 | 5455 | 	} | 
 | 5456 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5457 | 	entry = tnapi->tx_prod; | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5458 | 	base_flags = 0; | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5459 | 	mss = 0; | 
| Matt Carlson | c13e371 | 2007-05-05 11:50:04 -0700 | [diff] [blame] | 5460 | 	if ((mss = skb_shinfo(skb)->gso_size) != 0) { | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5461 | 		int tcp_opt_len, ip_tcp_len; | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 5462 | 		u32 hdrlen; | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5463 |  | 
 | 5464 | 		if (skb_header_cloned(skb) && | 
 | 5465 | 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 
 | 5466 | 			dev_kfree_skb(skb); | 
 | 5467 | 			goto out_unlock; | 
 | 5468 | 		} | 
 | 5469 |  | 
| Michael Chan | b002662 | 2006-07-03 19:42:14 -0700 | [diff] [blame] | 5470 | 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 5471 | 			hdrlen = skb_headlen(skb) - ETH_HLEN; | 
| Michael Chan | b002662 | 2006-07-03 19:42:14 -0700 | [diff] [blame] | 5472 | 		else { | 
| Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 5473 | 			struct iphdr *iph = ip_hdr(skb); | 
 | 5474 |  | 
| Arnaldo Carvalho de Melo | ab6a5bb | 2007-03-18 17:43:48 -0700 | [diff] [blame] | 5475 | 			tcp_opt_len = tcp_optlen(skb); | 
| Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 5476 | 			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); | 
| Michael Chan | b002662 | 2006-07-03 19:42:14 -0700 | [diff] [blame] | 5477 |  | 
| Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 5478 | 			iph->check = 0; | 
 | 5479 | 			iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 5480 | 			hdrlen = ip_tcp_len + tcp_opt_len; | 
| Michael Chan | b002662 | 2006-07-03 19:42:14 -0700 | [diff] [blame] | 5481 | 		} | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5482 |  | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 5483 | 		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 5484 | 			mss |= (hdrlen & 0xc) << 12; | 
 | 5485 | 			if (hdrlen & 0x10) | 
 | 5486 | 				base_flags |= 0x00000010; | 
 | 5487 | 			base_flags |= (hdrlen & 0x3e0) << 5; | 
 | 5488 | 		} else | 
 | 5489 | 			mss |= hdrlen << 9; | 
 | 5490 |  | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5491 | 		base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 
 | 5492 | 			       TXD_FLAG_CPU_POST_DMA); | 
 | 5493 |  | 
| Arnaldo Carvalho de Melo | aa8223c | 2007-04-10 21:04:22 -0700 | [diff] [blame] | 5494 | 		tcp_hdr(skb)->check = 0; | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5495 |  | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5496 | 	} | 
| Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 5497 | 	else if (skb->ip_summed == CHECKSUM_PARTIAL) | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5498 | 		base_flags |= TXD_FLAG_TCPUDP_CSUM; | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5499 | #if TG3_VLAN_TAG_USED | 
 | 5500 | 	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) | 
 | 5501 | 		base_flags |= (TXD_FLAG_VLAN | | 
 | 5502 | 			       (vlan_tx_tag_get(skb) << 16)); | 
 | 5503 | #endif | 
 | 5504 |  | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5505 | 	len = skb_headlen(skb); | 
 | 5506 |  | 
 | 5507 | 	/* Queue skb data, a.k.a. the main skb fragment. */ | 
 | 5508 | 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 
 | 5509 | 	if (pci_dma_mapping_error(tp->pdev, mapping)) { | 
| David S. Miller | 90079ce | 2008-09-11 04:52:51 -0700 | [diff] [blame] | 5510 | 		dev_kfree_skb(skb); | 
 | 5511 | 		goto out_unlock; | 
 | 5512 | 	} | 
 | 5513 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5514 | 	tnapi->tx_buffers[entry].skb = skb; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5515 | 	pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 5516 |  | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 5517 | 	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 5518 | 	    !mss && skb->len > ETH_DATA_LEN) | 
 | 5519 | 		base_flags |= TXD_FLAG_JMB_PKT; | 
 | 5520 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5521 | 	tg3_set_txd(tnapi, entry, mapping, len, base_flags, | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5522 | 		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); | 
 | 5523 |  | 
 | 5524 | 	entry = NEXT_TX(entry); | 
 | 5525 |  | 
 | 5526 | 	/* Now loop through additional data fragments, and queue them. */ | 
 | 5527 | 	if (skb_shinfo(skb)->nr_frags > 0) { | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5528 | 		last = skb_shinfo(skb)->nr_frags - 1; | 
 | 5529 | 		for (i = 0; i <= last; i++) { | 
 | 5530 | 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
 | 5531 |  | 
 | 5532 | 			len = frag->size; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5533 | 			mapping = pci_map_page(tp->pdev, | 
 | 5534 | 					       frag->page, | 
 | 5535 | 					       frag->page_offset, | 
 | 5536 | 					       len, PCI_DMA_TODEVICE); | 
 | 5537 | 			if (pci_dma_mapping_error(tp->pdev, mapping)) | 
 | 5538 | 				goto dma_error; | 
 | 5539 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5540 | 			tnapi->tx_buffers[entry].skb = NULL; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5541 | 			pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | 
 | 5542 | 					   mapping); | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5543 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5544 | 			tg3_set_txd(tnapi, entry, mapping, len, | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5545 | 				    base_flags, (i == last) | (mss << 1)); | 
 | 5546 |  | 
 | 5547 | 			entry = NEXT_TX(entry); | 
 | 5548 | 		} | 
 | 5549 | 	} | 
 | 5550 |  | 
 | 5551 | 	/* Packets are ready, update Tx producer idx local and on card. */ | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5552 | 	tw32_tx_mbox(tnapi->prodmbox, entry); | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5553 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5554 | 	tnapi->tx_prod = entry; | 
 | 5555 | 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 5556 | 		netif_tx_stop_queue(txq); | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5557 | 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 5558 | 			netif_tx_wake_queue(txq); | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5559 | 	} | 
 | 5560 |  | 
 | 5561 | out_unlock: | 
| Eric Dumazet | cdd0db0 | 2009-05-28 00:00:41 +0000 | [diff] [blame] | 5562 | 	mmiowb(); | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5563 |  | 
 | 5564 | 	return NETDEV_TX_OK; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5565 |  | 
 | 5566 | dma_error: | 
 | 5567 | 	last = i; | 
 | 5568 | 	entry = tnapi->tx_prod; | 
 | 5569 | 	tnapi->tx_buffers[entry].skb = NULL; | 
 | 5570 | 	pci_unmap_single(tp->pdev, | 
 | 5571 | 			 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), | 
 | 5572 | 			 skb_headlen(skb), | 
 | 5573 | 			 PCI_DMA_TODEVICE); | 
 | 5574 | 	for (i = 0; i <= last; i++) { | 
 | 5575 | 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
 | 5576 | 		entry = NEXT_TX(entry); | 
 | 5577 |  | 
 | 5578 | 		pci_unmap_page(tp->pdev, | 
 | 5579 | 			       pci_unmap_addr(&tnapi->tx_buffers[entry], | 
 | 5580 | 					      mapping), | 
 | 5581 | 			       frag->size, PCI_DMA_TODEVICE); | 
 | 5582 | 	} | 
 | 5583 |  | 
 | 5584 | 	dev_kfree_skb(skb); | 
 | 5585 | 	return NETDEV_TX_OK; | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5586 | } | 
 | 5587 |  | 
| Stephen Hemminger | 61357325 | 2009-08-31 19:50:58 +0000 | [diff] [blame] | 5588 | static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, | 
 | 5589 | 					  struct net_device *); | 
| Michael Chan | 52c0fd8 | 2006-06-29 20:15:54 -0700 | [diff] [blame] | 5590 |  | 
 | 5591 | /* Use GSO to workaround a rare TSO bug that may be triggered when the | 
 | 5592 |  * TSO header is greater than 80 bytes. | 
 | 5593 |  */ | 
 | 5594 | static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | 
 | 5595 | { | 
 | 5596 | 	struct sk_buff *segs, *nskb; | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5597 | 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; | 
| Michael Chan | 52c0fd8 | 2006-06-29 20:15:54 -0700 | [diff] [blame] | 5598 |  | 
 | 5599 | 	/* Estimate the number of fragments in the worst case */ | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5600 | 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { | 
| Michael Chan | 52c0fd8 | 2006-06-29 20:15:54 -0700 | [diff] [blame] | 5601 | 		netif_stop_queue(tp->dev); | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5602 | 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) | 
| Michael Chan | 7f62ad5 | 2007-02-20 23:25:40 -0800 | [diff] [blame] | 5603 | 			return NETDEV_TX_BUSY; | 
 | 5604 |  | 
 | 5605 | 		netif_wake_queue(tp->dev); | 
| Michael Chan | 52c0fd8 | 2006-06-29 20:15:54 -0700 | [diff] [blame] | 5606 | 	} | 
 | 5607 |  | 
 | 5608 | 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); | 
| Hirofumi Nakagawa | 801678c | 2008-04-29 01:03:09 -0700 | [diff] [blame] | 5609 | 	if (IS_ERR(segs)) | 
| Michael Chan | 52c0fd8 | 2006-06-29 20:15:54 -0700 | [diff] [blame] | 5610 | 		goto tg3_tso_bug_end; | 
 | 5611 |  | 
 | 5612 | 	do { | 
 | 5613 | 		nskb = segs; | 
 | 5614 | 		segs = segs->next; | 
 | 5615 | 		nskb->next = NULL; | 
 | 5616 | 		tg3_start_xmit_dma_bug(nskb, tp->dev); | 
 | 5617 | 	} while (segs); | 
 | 5618 |  | 
 | 5619 | tg3_tso_bug_end: | 
 | 5620 | 	dev_kfree_skb(skb); | 
 | 5621 |  | 
 | 5622 | 	return NETDEV_TX_OK; | 
 | 5623 | } | 
| Michael Chan | 52c0fd8 | 2006-06-29 20:15:54 -0700 | [diff] [blame] | 5624 |  | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5625 | /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and | 
 | 5626 |  * support TG3_FLG2_HW_TSO_1 or firmware TSO only. | 
 | 5627 |  */ | 
| Stephen Hemminger | 61357325 | 2009-08-31 19:50:58 +0000 | [diff] [blame] | 5628 | static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | 
 | 5629 | 					  struct net_device *dev) | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5630 | { | 
 | 5631 | 	struct tg3 *tp = netdev_priv(dev); | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 5632 | 	u32 len, entry, base_flags, mss; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5633 | 	int would_hit_hwbug; | 
| David S. Miller | 90079ce | 2008-09-11 04:52:51 -0700 | [diff] [blame] | 5634 | 	dma_addr_t mapping; | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5635 | 	struct tg3_napi *tnapi; | 
 | 5636 | 	struct netdev_queue *txq; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5637 | 	unsigned int i, last; | 
 | 5638 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5639 |  | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5640 | 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 
 | 5641 | 	tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 5642 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5643 | 		tnapi++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5644 |  | 
| Michael Chan | 00b7050 | 2006-06-17 21:58:45 -0700 | [diff] [blame] | 5645 | 	/* We are running in BH disabled context with netif_tx_lock | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 5646 | 	 * and TX reclaim runs via tp->napi.poll inside of a software | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5647 | 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have | 
 | 5648 | 	 * no IRQ context deadlocks to worry about either.  Rejoice! | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5649 | 	 */ | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5650 | 	if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5651 | 		if (!netif_tx_queue_stopped(txq)) { | 
 | 5652 | 			netif_tx_stop_queue(txq); | 
| Stephen Hemminger | 1f064a8 | 2005-12-06 17:36:44 -0800 | [diff] [blame] | 5653 |  | 
 | 5654 | 			/* This is a hard error, log it. */ | 
 | 5655 | 			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 
 | 5656 | 			       "queue awake!\n", dev->name); | 
 | 5657 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5658 | 		return NETDEV_TX_BUSY; | 
 | 5659 | 	} | 
 | 5660 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5661 | 	entry = tnapi->tx_prod; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5662 | 	base_flags = 0; | 
| Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 5663 | 	if (skb->ip_summed == CHECKSUM_PARTIAL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5664 | 		base_flags |= TXD_FLAG_TCPUDP_CSUM; | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5665 |  | 
| Matt Carlson | c13e371 | 2007-05-05 11:50:04 -0700 | [diff] [blame] | 5666 | 	if ((mss = skb_shinfo(skb)->gso_size) != 0) { | 
| Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 5667 | 		struct iphdr *iph; | 
| Matt Carlson | 92c6b8d | 2009-11-02 14:23:27 +0000 | [diff] [blame] | 5668 | 		u32 tcp_opt_len, ip_tcp_len, hdr_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5669 |  | 
 | 5670 | 		if (skb_header_cloned(skb) && | 
 | 5671 | 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 
 | 5672 | 			dev_kfree_skb(skb); | 
 | 5673 | 			goto out_unlock; | 
 | 5674 | 		} | 
 | 5675 |  | 
| Arnaldo Carvalho de Melo | ab6a5bb | 2007-03-18 17:43:48 -0700 | [diff] [blame] | 5676 | 		tcp_opt_len = tcp_optlen(skb); | 
| Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 5677 | 		ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5678 |  | 
| Michael Chan | 52c0fd8 | 2006-06-29 20:15:54 -0700 | [diff] [blame] | 5679 | 		hdr_len = ip_tcp_len + tcp_opt_len; | 
 | 5680 | 		if (unlikely((ETH_HLEN + hdr_len) > 80) && | 
| Michael Chan | 7f62ad5 | 2007-02-20 23:25:40 -0800 | [diff] [blame] | 5681 | 			     (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) | 
| Michael Chan | 52c0fd8 | 2006-06-29 20:15:54 -0700 | [diff] [blame] | 5682 | 			return (tg3_tso_bug(tp, skb)); | 
 | 5683 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5684 | 		base_flags |= (TXD_FLAG_CPU_PRE_DMA | | 
 | 5685 | 			       TXD_FLAG_CPU_POST_DMA); | 
 | 5686 |  | 
| Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 5687 | 		iph = ip_hdr(skb); | 
 | 5688 | 		iph->check = 0; | 
 | 5689 | 		iph->tot_len = htons(mss + hdr_len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5690 | 		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | 
| Arnaldo Carvalho de Melo | aa8223c | 2007-04-10 21:04:22 -0700 | [diff] [blame] | 5691 | 			tcp_hdr(skb)->check = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5692 | 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM; | 
| Arnaldo Carvalho de Melo | aa8223c | 2007-04-10 21:04:22 -0700 | [diff] [blame] | 5693 | 		} else | 
 | 5694 | 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | 
 | 5695 | 								 iph->daddr, 0, | 
 | 5696 | 								 IPPROTO_TCP, | 
 | 5697 | 								 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5698 |  | 
| Matt Carlson | 615774f | 2009-11-13 13:03:39 +0000 | [diff] [blame] | 5699 | 		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { | 
 | 5700 | 			mss |= (hdr_len & 0xc) << 12; | 
 | 5701 | 			if (hdr_len & 0x10) | 
 | 5702 | 				base_flags |= 0x00000010; | 
 | 5703 | 			base_flags |= (hdr_len & 0x3e0) << 5; | 
 | 5704 | 		} else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) | 
| Matt Carlson | 92c6b8d | 2009-11-02 14:23:27 +0000 | [diff] [blame] | 5705 | 			mss |= hdr_len << 9; | 
 | 5706 | 		else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || | 
 | 5707 | 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 
| Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 5708 | 			if (tcp_opt_len || iph->ihl > 5) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5709 | 				int tsflags; | 
 | 5710 |  | 
| Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 5711 | 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5712 | 				mss |= (tsflags << 11); | 
 | 5713 | 			} | 
 | 5714 | 		} else { | 
| Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 5715 | 			if (tcp_opt_len || iph->ihl > 5) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5716 | 				int tsflags; | 
 | 5717 |  | 
| Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 5718 | 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5719 | 				base_flags |= tsflags << 12; | 
 | 5720 | 			} | 
 | 5721 | 		} | 
 | 5722 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5723 | #if TG3_VLAN_TAG_USED | 
 | 5724 | 	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) | 
 | 5725 | 		base_flags |= (TXD_FLAG_VLAN | | 
 | 5726 | 			       (vlan_tx_tag_get(skb) << 16)); | 
 | 5727 | #endif | 
 | 5728 |  | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 5729 | 	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && | 
| Matt Carlson | 615774f | 2009-11-13 13:03:39 +0000 | [diff] [blame] | 5730 | 	    !mss && skb->len > ETH_DATA_LEN) | 
 | 5731 | 		base_flags |= TXD_FLAG_JMB_PKT; | 
 | 5732 |  | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5733 | 	len = skb_headlen(skb); | 
 | 5734 |  | 
 | 5735 | 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 
 | 5736 | 	if (pci_dma_mapping_error(tp->pdev, mapping)) { | 
| David S. Miller | 90079ce | 2008-09-11 04:52:51 -0700 | [diff] [blame] | 5737 | 		dev_kfree_skb(skb); | 
 | 5738 | 		goto out_unlock; | 
 | 5739 | 	} | 
 | 5740 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5741 | 	tnapi->tx_buffers[entry].skb = skb; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5742 | 	pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5743 |  | 
 | 5744 | 	would_hit_hwbug = 0; | 
 | 5745 |  | 
| Matt Carlson | 92c6b8d | 2009-11-02 14:23:27 +0000 | [diff] [blame] | 5746 | 	if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) | 
 | 5747 | 		would_hit_hwbug = 1; | 
 | 5748 |  | 
| Matt Carlson | 0e1406d | 2009-11-02 12:33:33 +0000 | [diff] [blame] | 5749 | 	if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && | 
 | 5750 | 	    tg3_4g_overflow_test(mapping, len)) | 
| Matt Carlson | 41588ba | 2008-04-19 18:12:33 -0700 | [diff] [blame] | 5751 | 		would_hit_hwbug = 1; | 
| Matt Carlson | 0e1406d | 2009-11-02 12:33:33 +0000 | [diff] [blame] | 5752 |  | 
 | 5753 | 	if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && | 
 | 5754 | 	    tg3_40bit_overflow_test(tp, mapping, len)) | 
 | 5755 | 		would_hit_hwbug = 1; | 
 | 5756 |  | 
 | 5757 | 	if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5758 | 		would_hit_hwbug = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5759 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5760 | 	tg3_set_txd(tnapi, entry, mapping, len, base_flags, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5761 | 		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); | 
 | 5762 |  | 
 | 5763 | 	entry = NEXT_TX(entry); | 
 | 5764 |  | 
 | 5765 | 	/* Now loop through additional data fragments, and queue them. */ | 
 | 5766 | 	if (skb_shinfo(skb)->nr_frags > 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5767 | 		last = skb_shinfo(skb)->nr_frags - 1; | 
 | 5768 | 		for (i = 0; i <= last; i++) { | 
 | 5769 | 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
 | 5770 |  | 
 | 5771 | 			len = frag->size; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5772 | 			mapping = pci_map_page(tp->pdev, | 
 | 5773 | 					       frag->page, | 
 | 5774 | 					       frag->page_offset, | 
 | 5775 | 					       len, PCI_DMA_TODEVICE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5776 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5777 | 			tnapi->tx_buffers[entry].skb = NULL; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5778 | 			pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | 
 | 5779 | 					   mapping); | 
 | 5780 | 			if (pci_dma_mapping_error(tp->pdev, mapping)) | 
 | 5781 | 				goto dma_error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5782 |  | 
| Matt Carlson | 92c6b8d | 2009-11-02 14:23:27 +0000 | [diff] [blame] | 5783 | 			if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && | 
 | 5784 | 			    len <= 8) | 
 | 5785 | 				would_hit_hwbug = 1; | 
 | 5786 |  | 
| Matt Carlson | 0e1406d | 2009-11-02 12:33:33 +0000 | [diff] [blame] | 5787 | 			if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && | 
 | 5788 | 			    tg3_4g_overflow_test(mapping, len)) | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5789 | 				would_hit_hwbug = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5790 |  | 
| Matt Carlson | 0e1406d | 2009-11-02 12:33:33 +0000 | [diff] [blame] | 5791 | 			if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && | 
 | 5792 | 			    tg3_40bit_overflow_test(tp, mapping, len)) | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 5793 | 				would_hit_hwbug = 1; | 
 | 5794 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5795 | 			if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5796 | 				tg3_set_txd(tnapi, entry, mapping, len, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5797 | 					    base_flags, (i == last)|(mss << 1)); | 
 | 5798 | 			else | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5799 | 				tg3_set_txd(tnapi, entry, mapping, len, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5800 | 					    base_flags, (i == last)); | 
 | 5801 |  | 
 | 5802 | 			entry = NEXT_TX(entry); | 
 | 5803 | 		} | 
 | 5804 | 	} | 
 | 5805 |  | 
 | 5806 | 	if (would_hit_hwbug) { | 
 | 5807 | 		u32 last_plus_one = entry; | 
 | 5808 | 		u32 start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5809 |  | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5810 | 		start = entry - 1 - skb_shinfo(skb)->nr_frags; | 
 | 5811 | 		start &= (TG3_TX_RING_SIZE - 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5812 |  | 
 | 5813 | 		/* If the workaround fails due to memory/mapping | 
 | 5814 | 		 * failure, silently drop this packet. | 
 | 5815 | 		 */ | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5816 | 		if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, | 
| Michael Chan | c58ec93 | 2005-09-17 00:46:27 -0700 | [diff] [blame] | 5817 | 						&start, base_flags, mss)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5818 | 			goto out_unlock; | 
 | 5819 |  | 
 | 5820 | 		entry = start; | 
 | 5821 | 	} | 
 | 5822 |  | 
 | 5823 | 	/* Packets are ready, update Tx producer idx local and on card. */ | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5824 | 	tw32_tx_mbox(tnapi->prodmbox, entry); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5825 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5826 | 	tnapi->tx_prod = entry; | 
 | 5827 | 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5828 | 		netif_tx_stop_queue(txq); | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 5829 | 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) | 
| Matt Carlson | 24f4efd | 2009-11-13 13:03:35 +0000 | [diff] [blame] | 5830 | 			netif_tx_wake_queue(txq); | 
| Michael Chan | 51b9146 | 2005-09-01 17:41:28 -0700 | [diff] [blame] | 5831 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5832 |  | 
 | 5833 | out_unlock: | 
| Eric Dumazet | cdd0db0 | 2009-05-28 00:00:41 +0000 | [diff] [blame] | 5834 | 	mmiowb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5835 |  | 
 | 5836 | 	return NETDEV_TX_OK; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 5837 |  | 
 | 5838 | dma_error: | 
 | 5839 | 	last = i; | 
 | 5840 | 	entry = tnapi->tx_prod; | 
 | 5841 | 	tnapi->tx_buffers[entry].skb = NULL; | 
 | 5842 | 	pci_unmap_single(tp->pdev, | 
 | 5843 | 			 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), | 
 | 5844 | 			 skb_headlen(skb), | 
 | 5845 | 			 PCI_DMA_TODEVICE); | 
 | 5846 | 	for (i = 0; i <= last; i++) { | 
 | 5847 | 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
 | 5848 | 		entry = NEXT_TX(entry); | 
 | 5849 |  | 
 | 5850 | 		pci_unmap_page(tp->pdev, | 
 | 5851 | 			       pci_unmap_addr(&tnapi->tx_buffers[entry], | 
 | 5852 | 					      mapping), | 
 | 5853 | 			       frag->size, PCI_DMA_TODEVICE); | 
 | 5854 | 	} | 
 | 5855 |  | 
 | 5856 | 	dev_kfree_skb(skb); | 
 | 5857 | 	return NETDEV_TX_OK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5858 | } | 
 | 5859 |  | 
 | 5860 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | 
 | 5861 | 			       int new_mtu) | 
 | 5862 | { | 
 | 5863 | 	dev->mtu = new_mtu; | 
 | 5864 |  | 
| Michael Chan | ef7f5ec | 2005-07-25 12:32:25 -0700 | [diff] [blame] | 5865 | 	if (new_mtu > ETH_DATA_LEN) { | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 5866 | 		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { | 
| Michael Chan | ef7f5ec | 2005-07-25 12:32:25 -0700 | [diff] [blame] | 5867 | 			tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; | 
 | 5868 | 			ethtool_op_set_tso(dev, 0); | 
 | 5869 | 		} | 
 | 5870 | 		else | 
 | 5871 | 			tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; | 
 | 5872 | 	} else { | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 5873 | 		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | 
| Michael Chan | ef7f5ec | 2005-07-25 12:32:25 -0700 | [diff] [blame] | 5874 | 			tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 
| Michael Chan | 0f893dc | 2005-07-25 12:30:38 -0700 | [diff] [blame] | 5875 | 		tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; | 
| Michael Chan | ef7f5ec | 2005-07-25 12:32:25 -0700 | [diff] [blame] | 5876 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5877 | } | 
 | 5878 |  | 
 | 5879 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) | 
 | 5880 | { | 
 | 5881 | 	struct tg3 *tp = netdev_priv(dev); | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 5882 | 	int err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5883 |  | 
 | 5884 | 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) | 
 | 5885 | 		return -EINVAL; | 
 | 5886 |  | 
 | 5887 | 	if (!netif_running(dev)) { | 
 | 5888 | 		/* We'll just catch it later when the | 
 | 5889 | 		 * device is up'd. | 
 | 5890 | 		 */ | 
 | 5891 | 		tg3_set_mtu(dev, tp, new_mtu); | 
 | 5892 | 		return 0; | 
 | 5893 | 	} | 
 | 5894 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 5895 | 	tg3_phy_stop(tp); | 
 | 5896 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5897 | 	tg3_netif_stop(tp); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5898 |  | 
 | 5899 | 	tg3_full_lock(tp, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5900 |  | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 5901 | 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5902 |  | 
 | 5903 | 	tg3_set_mtu(dev, tp, new_mtu); | 
 | 5904 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 5905 | 	err = tg3_restart_hw(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5906 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 5907 | 	if (!err) | 
 | 5908 | 		tg3_netif_start(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5909 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 5910 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5911 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 5912 | 	if (!err) | 
 | 5913 | 		tg3_phy_start(tp); | 
 | 5914 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 5915 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5916 | } | 
 | 5917 |  | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 5918 | static void tg3_rx_prodring_free(struct tg3 *tp, | 
 | 5919 | 				 struct tg3_rx_prodring_set *tpr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5920 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5921 | 	int i; | 
 | 5922 |  | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 5923 | 	if (tpr != &tp->prodring[0]) { | 
 | 5924 | 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; | 
 | 5925 | 		     i = (i + 1) % TG3_RX_RING_SIZE) | 
 | 5926 | 			tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], | 
 | 5927 | 					tp->rx_pkt_map_sz); | 
 | 5928 |  | 
 | 5929 | 		if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 
 | 5930 | 			for (i = tpr->rx_jmb_cons_idx; | 
 | 5931 | 			     i != tpr->rx_jmb_prod_idx; | 
 | 5932 | 			     i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { | 
 | 5933 | 				tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], | 
 | 5934 | 						TG3_RX_JMB_MAP_SZ); | 
 | 5935 | 			} | 
 | 5936 | 		} | 
 | 5937 |  | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 5938 | 		return; | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 5939 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5940 |  | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 5941 | 	for (i = 0; i < TG3_RX_RING_SIZE; i++) | 
 | 5942 | 		tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], | 
 | 5943 | 				tp->rx_pkt_map_sz); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5944 |  | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 5945 | 	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 5946 | 		for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) | 
 | 5947 | 			tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], | 
 | 5948 | 					TG3_RX_JMB_MAP_SZ); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5949 | 	} | 
 | 5950 | } | 
 | 5951 |  | 
 | 5952 | /* Initialize tx/rx rings for packet processing. | 
 | 5953 |  * | 
 | 5954 |  * The chip has been shut down and the driver detached from | 
 | 5955 |  * the networking, so no interrupts or new tx packets will | 
 | 5956 |  * end up in the driver.  tp->{tx,}lock are held and thus | 
 | 5957 |  * we may not sleep. | 
 | 5958 |  */ | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 5959 | static int tg3_rx_prodring_alloc(struct tg3 *tp, | 
 | 5960 | 				 struct tg3_rx_prodring_set *tpr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5961 | { | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 5962 | 	u32 i, rx_pkt_dma_sz; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5963 |  | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 5964 | 	tpr->rx_std_cons_idx = 0; | 
 | 5965 | 	tpr->rx_std_prod_idx = 0; | 
 | 5966 | 	tpr->rx_jmb_cons_idx = 0; | 
 | 5967 | 	tpr->rx_jmb_prod_idx = 0; | 
 | 5968 |  | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 5969 | 	if (tpr != &tp->prodring[0]) { | 
 | 5970 | 		memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); | 
 | 5971 | 		if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) | 
 | 5972 | 			memset(&tpr->rx_jmb_buffers[0], 0, | 
 | 5973 | 			       TG3_RX_JMB_BUFF_RING_SIZE); | 
 | 5974 | 		goto done; | 
 | 5975 | 	} | 
 | 5976 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5977 | 	/* Zero out all descriptors. */ | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 5978 | 	memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5979 |  | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 5980 | 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 5981 | 	if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 5982 | 	    tp->dev->mtu > ETH_DATA_LEN) | 
 | 5983 | 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; | 
 | 5984 | 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); | 
| Michael Chan | 7e72aad | 2005-07-25 12:31:17 -0700 | [diff] [blame] | 5985 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5986 | 	/* Initialize invariants of the rings, we only set this | 
 | 5987 | 	 * stuff once.  This works because the card does not | 
 | 5988 | 	 * write into the rx buffer posting rings. | 
 | 5989 | 	 */ | 
 | 5990 | 	for (i = 0; i < TG3_RX_RING_SIZE; i++) { | 
 | 5991 | 		struct tg3_rx_buffer_desc *rxd; | 
 | 5992 |  | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 5993 | 		rxd = &tpr->rx_std[i]; | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 5994 | 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5995 | 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); | 
 | 5996 | 		rxd->opaque = (RXD_OPAQUE_RING_STD | | 
 | 5997 | 			       (i << RXD_OPAQUE_INDEX_SHIFT)); | 
 | 5998 | 	} | 
 | 5999 |  | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6000 | 	/* Now allocate fresh SKBs for each rx ring. */ | 
 | 6001 | 	for (i = 0; i < tp->rx_pending; i++) { | 
| Matt Carlson | 86b21e5 | 2009-11-13 13:03:45 +0000 | [diff] [blame] | 6002 | 		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6003 | 			printk(KERN_WARNING PFX | 
 | 6004 | 			       "%s: Using a smaller RX standard ring, " | 
 | 6005 | 			       "only %d out of %d buffers were allocated " | 
 | 6006 | 			       "successfully.\n", | 
 | 6007 | 			       tp->dev->name, i, tp->rx_pending); | 
 | 6008 | 			if (i == 0) | 
 | 6009 | 				goto initfail; | 
 | 6010 | 			tp->rx_pending = i; | 
 | 6011 | 			break; | 
 | 6012 | 		} | 
 | 6013 | 	} | 
 | 6014 |  | 
 | 6015 | 	if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) | 
 | 6016 | 		goto done; | 
 | 6017 |  | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6018 | 	memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6019 |  | 
| Michael Chan | 0f893dc | 2005-07-25 12:30:38 -0700 | [diff] [blame] | 6020 | 	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6021 | 		for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | 
 | 6022 | 			struct tg3_rx_buffer_desc *rxd; | 
 | 6023 |  | 
| Matt Carlson | 79ed5ac | 2009-08-28 14:00:55 +0000 | [diff] [blame] | 6024 | 			rxd = &tpr->rx_jmb[i].std; | 
| Matt Carlson | 287be12 | 2009-08-28 13:58:46 +0000 | [diff] [blame] | 6025 | 			rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6026 | 			rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | | 
 | 6027 | 				RXD_FLAG_JUMBO; | 
 | 6028 | 			rxd->opaque = (RXD_OPAQUE_RING_JUMBO | | 
 | 6029 | 			       (i << RXD_OPAQUE_INDEX_SHIFT)); | 
 | 6030 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6031 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6032 | 		for (i = 0; i < tp->rx_jumbo_pending; i++) { | 
| Matt Carlson | 86b21e5 | 2009-11-13 13:03:45 +0000 | [diff] [blame] | 6033 | 			if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, | 
| Matt Carlson | afc081f | 2009-11-13 13:03:43 +0000 | [diff] [blame] | 6034 | 					     i) < 0) { | 
| Michael Chan | 32d8c57 | 2006-07-25 16:38:29 -0700 | [diff] [blame] | 6035 | 				printk(KERN_WARNING PFX | 
 | 6036 | 				       "%s: Using a smaller RX jumbo ring, " | 
 | 6037 | 				       "only %d out of %d buffers were " | 
 | 6038 | 				       "allocated successfully.\n", | 
 | 6039 | 				       tp->dev->name, i, tp->rx_jumbo_pending); | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6040 | 				if (i == 0) | 
 | 6041 | 					goto initfail; | 
| Michael Chan | 32d8c57 | 2006-07-25 16:38:29 -0700 | [diff] [blame] | 6042 | 				tp->rx_jumbo_pending = i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6043 | 				break; | 
| Michael Chan | 32d8c57 | 2006-07-25 16:38:29 -0700 | [diff] [blame] | 6044 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6045 | 		} | 
 | 6046 | 	} | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6047 |  | 
 | 6048 | done: | 
| Michael Chan | 32d8c57 | 2006-07-25 16:38:29 -0700 | [diff] [blame] | 6049 | 	return 0; | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6050 |  | 
 | 6051 | initfail: | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6052 | 	tg3_rx_prodring_free(tp, tpr); | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6053 | 	return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6054 | } | 
 | 6055 |  | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6056 | static void tg3_rx_prodring_fini(struct tg3 *tp, | 
 | 6057 | 				 struct tg3_rx_prodring_set *tpr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6058 | { | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6059 | 	kfree(tpr->rx_std_buffers); | 
 | 6060 | 	tpr->rx_std_buffers = NULL; | 
 | 6061 | 	kfree(tpr->rx_jmb_buffers); | 
 | 6062 | 	tpr->rx_jmb_buffers = NULL; | 
 | 6063 | 	if (tpr->rx_std) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6064 | 		pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6065 | 				    tpr->rx_std, tpr->rx_std_mapping); | 
 | 6066 | 		tpr->rx_std = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6067 | 	} | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6068 | 	if (tpr->rx_jmb) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6069 | 		pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6070 | 				    tpr->rx_jmb, tpr->rx_jmb_mapping); | 
 | 6071 | 		tpr->rx_jmb = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6072 | 	} | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6073 | } | 
 | 6074 |  | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6075 | static int tg3_rx_prodring_init(struct tg3 *tp, | 
 | 6076 | 				struct tg3_rx_prodring_set *tpr) | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6077 | { | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 6078 | 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6079 | 	if (!tpr->rx_std_buffers) | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6080 | 		return -ENOMEM; | 
 | 6081 |  | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6082 | 	tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, | 
 | 6083 | 					   &tpr->rx_std_mapping); | 
 | 6084 | 	if (!tpr->rx_std) | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6085 | 		goto err_out; | 
 | 6086 |  | 
 | 6087 | 	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 6088 | 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6089 | 					      GFP_KERNEL); | 
 | 6090 | 		if (!tpr->rx_jmb_buffers) | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6091 | 			goto err_out; | 
 | 6092 |  | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6093 | 		tpr->rx_jmb = pci_alloc_consistent(tp->pdev, | 
 | 6094 | 						   TG3_RX_JUMBO_RING_BYTES, | 
 | 6095 | 						   &tpr->rx_jmb_mapping); | 
 | 6096 | 		if (!tpr->rx_jmb) | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6097 | 			goto err_out; | 
 | 6098 | 	} | 
 | 6099 |  | 
 | 6100 | 	return 0; | 
 | 6101 |  | 
 | 6102 | err_out: | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 6103 | 	tg3_rx_prodring_fini(tp, tpr); | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6104 | 	return -ENOMEM; | 
 | 6105 | } | 
 | 6106 |  | 
 | 6107 | /* Free up pending packets in all rx/tx rings. | 
 | 6108 |  * | 
 | 6109 |  * The chip has been shut down and the driver detached from | 
 | 6110 |  * the networking, so no interrupts or new tx packets will | 
 | 6111 |  * end up in the driver.  tp->{tx,}lock is not held and we are not | 
 | 6112 |  * in an interrupt context and thus may sleep. | 
 | 6113 |  */ | 
 | 6114 | static void tg3_free_rings(struct tg3 *tp) | 
 | 6115 | { | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6116 | 	int i, j; | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6117 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6118 | 	for (j = 0; j < tp->irq_cnt; j++) { | 
 | 6119 | 		struct tg3_napi *tnapi = &tp->napi[j]; | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6120 |  | 
| Matt Carlson | 0c1d0e2 | 2009-09-01 13:16:33 +0000 | [diff] [blame] | 6121 | 		if (!tnapi->tx_buffers) | 
 | 6122 | 			continue; | 
 | 6123 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6124 | 		for (i = 0; i < TG3_TX_RING_SIZE; ) { | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 6125 | 			struct ring_info *txp; | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6126 | 			struct sk_buff *skb; | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 6127 | 			unsigned int k; | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6128 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6129 | 			txp = &tnapi->tx_buffers[i]; | 
 | 6130 | 			skb = txp->skb; | 
 | 6131 |  | 
 | 6132 | 			if (skb == NULL) { | 
 | 6133 | 				i++; | 
 | 6134 | 				continue; | 
 | 6135 | 			} | 
 | 6136 |  | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 6137 | 			pci_unmap_single(tp->pdev, | 
 | 6138 | 					 pci_unmap_addr(txp, mapping), | 
 | 6139 | 					 skb_headlen(skb), | 
 | 6140 | 					 PCI_DMA_TODEVICE); | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6141 | 			txp->skb = NULL; | 
 | 6142 |  | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 6143 | 			i++; | 
 | 6144 |  | 
 | 6145 | 			for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { | 
 | 6146 | 				txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; | 
 | 6147 | 				pci_unmap_page(tp->pdev, | 
 | 6148 | 					       pci_unmap_addr(txp, mapping), | 
 | 6149 | 					       skb_shinfo(skb)->frags[k].size, | 
 | 6150 | 					       PCI_DMA_TODEVICE); | 
 | 6151 | 				i++; | 
 | 6152 | 			} | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6153 |  | 
 | 6154 | 			dev_kfree_skb_any(skb); | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6155 | 		} | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6156 |  | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 6157 | 		if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1) | 
 | 6158 | 			tg3_rx_prodring_free(tp, &tp->prodring[j]); | 
 | 6159 | 	} | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6160 | } | 
 | 6161 |  | 
 | 6162 | /* Initialize tx/rx rings for packet processing. | 
 | 6163 |  * | 
 | 6164 |  * The chip has been shut down and the driver detached from | 
 | 6165 |  * the networking, so no interrupts or new tx packets will | 
 | 6166 |  * end up in the driver.  tp->{tx,}lock are held and thus | 
 | 6167 |  * we may not sleep. | 
 | 6168 |  */ | 
 | 6169 | static int tg3_init_rings(struct tg3 *tp) | 
 | 6170 | { | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6171 | 	int i; | 
| Matt Carlson | 7233448 | 2009-08-28 14:03:01 +0000 | [diff] [blame] | 6172 |  | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6173 | 	/* Free up all the SKBs. */ | 
 | 6174 | 	tg3_free_rings(tp); | 
 | 6175 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6176 | 	for (i = 0; i < tp->irq_cnt; i++) { | 
 | 6177 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6178 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6179 | 		tnapi->last_tag = 0; | 
 | 6180 | 		tnapi->last_irq_tag = 0; | 
 | 6181 | 		tnapi->hw_status->status = 0; | 
 | 6182 | 		tnapi->hw_status->status_tag = 0; | 
 | 6183 | 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 
 | 6184 |  | 
 | 6185 | 		tnapi->tx_prod = 0; | 
 | 6186 | 		tnapi->tx_cons = 0; | 
| Matt Carlson | 0c1d0e2 | 2009-09-01 13:16:33 +0000 | [diff] [blame] | 6187 | 		if (tnapi->tx_ring) | 
 | 6188 | 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6189 |  | 
 | 6190 | 		tnapi->rx_rcb_ptr = 0; | 
| Matt Carlson | 0c1d0e2 | 2009-09-01 13:16:33 +0000 | [diff] [blame] | 6191 | 		if (tnapi->rx_rcb) | 
 | 6192 | 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 6193 |  | 
 | 6194 | 		if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) && | 
 | 6195 | 			tg3_rx_prodring_alloc(tp, &tp->prodring[i])) | 
 | 6196 | 			return -ENOMEM; | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6197 | 	} | 
| Matt Carlson | 7233448 | 2009-08-28 14:03:01 +0000 | [diff] [blame] | 6198 |  | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 6199 | 	return 0; | 
| Matt Carlson | cf7a729 | 2009-08-28 13:59:57 +0000 | [diff] [blame] | 6200 | } | 
 | 6201 |  | 
 | 6202 | /* | 
 | 6203 |  * Must not be invoked with interrupt sources disabled and | 
 | 6204 |  * the hardware shutdown down. | 
 | 6205 |  */ | 
 | 6206 | static void tg3_free_consistent(struct tg3 *tp) | 
 | 6207 | { | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6208 | 	int i; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 6209 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6210 | 	for (i = 0; i < tp->irq_cnt; i++) { | 
 | 6211 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
 | 6212 |  | 
 | 6213 | 		if (tnapi->tx_ring) { | 
 | 6214 | 			pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, | 
 | 6215 | 				tnapi->tx_ring, tnapi->tx_desc_mapping); | 
 | 6216 | 			tnapi->tx_ring = NULL; | 
 | 6217 | 		} | 
 | 6218 |  | 
 | 6219 | 		kfree(tnapi->tx_buffers); | 
 | 6220 | 		tnapi->tx_buffers = NULL; | 
 | 6221 |  | 
 | 6222 | 		if (tnapi->rx_rcb) { | 
 | 6223 | 			pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), | 
 | 6224 | 					    tnapi->rx_rcb, | 
 | 6225 | 					    tnapi->rx_rcb_mapping); | 
 | 6226 | 			tnapi->rx_rcb = NULL; | 
 | 6227 | 		} | 
 | 6228 |  | 
 | 6229 | 		if (tnapi->hw_status) { | 
 | 6230 | 			pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, | 
 | 6231 | 					    tnapi->hw_status, | 
 | 6232 | 					    tnapi->status_mapping); | 
 | 6233 | 			tnapi->hw_status = NULL; | 
 | 6234 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6235 | 	} | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6236 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6237 | 	if (tp->hw_stats) { | 
 | 6238 | 		pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), | 
 | 6239 | 				    tp->hw_stats, tp->stats_mapping); | 
 | 6240 | 		tp->hw_stats = NULL; | 
 | 6241 | 	} | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6242 |  | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 6243 | 	for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) | 
 | 6244 | 		tg3_rx_prodring_fini(tp, &tp->prodring[i]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6245 | } | 
 | 6246 |  | 
 | 6247 | /* | 
 | 6248 |  * Must not be invoked with interrupt sources disabled and | 
 | 6249 |  * the hardware shutdown down.  Can sleep. | 
 | 6250 |  */ | 
 | 6251 | static int tg3_alloc_consistent(struct tg3 *tp) | 
 | 6252 | { | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6253 | 	int i; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 6254 |  | 
| Matt Carlson | 2b2cdb6 | 2009-11-13 13:03:48 +0000 | [diff] [blame] | 6255 | 	for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) { | 
 | 6256 | 		if (tg3_rx_prodring_init(tp, &tp->prodring[i])) | 
 | 6257 | 			goto err_out; | 
 | 6258 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6259 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6260 | 	tp->hw_stats = pci_alloc_consistent(tp->pdev, | 
 | 6261 | 					    sizeof(struct tg3_hw_stats), | 
 | 6262 | 					    &tp->stats_mapping); | 
 | 6263 | 	if (!tp->hw_stats) | 
 | 6264 | 		goto err_out; | 
 | 6265 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6266 | 	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); | 
 | 6267 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6268 | 	for (i = 0; i < tp->irq_cnt; i++) { | 
 | 6269 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
| Matt Carlson | 8d9d7cf | 2009-09-01 13:19:05 +0000 | [diff] [blame] | 6270 | 		struct tg3_hw_status *sblk; | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6271 |  | 
 | 6272 | 		tnapi->hw_status = pci_alloc_consistent(tp->pdev, | 
 | 6273 | 							TG3_HW_STATUS_SIZE, | 
 | 6274 | 							&tnapi->status_mapping); | 
 | 6275 | 		if (!tnapi->hw_status) | 
 | 6276 | 			goto err_out; | 
 | 6277 |  | 
 | 6278 | 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 
| Matt Carlson | 8d9d7cf | 2009-09-01 13:19:05 +0000 | [diff] [blame] | 6279 | 		sblk = tnapi->hw_status; | 
 | 6280 |  | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 6281 | 		/* If multivector TSS is enabled, vector 0 does not handle | 
 | 6282 | 		 * tx interrupts.  Don't allocate any resources for it. | 
 | 6283 | 		 */ | 
 | 6284 | 		if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) || | 
 | 6285 | 		    (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) { | 
 | 6286 | 			tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * | 
 | 6287 | 						    TG3_TX_RING_SIZE, | 
 | 6288 | 						    GFP_KERNEL); | 
 | 6289 | 			if (!tnapi->tx_buffers) | 
 | 6290 | 				goto err_out; | 
 | 6291 |  | 
 | 6292 | 			tnapi->tx_ring = pci_alloc_consistent(tp->pdev, | 
 | 6293 | 							      TG3_TX_RING_BYTES, | 
 | 6294 | 						       &tnapi->tx_desc_mapping); | 
 | 6295 | 			if (!tnapi->tx_ring) | 
 | 6296 | 				goto err_out; | 
 | 6297 | 		} | 
 | 6298 |  | 
| Matt Carlson | 8d9d7cf | 2009-09-01 13:19:05 +0000 | [diff] [blame] | 6299 | 		/* | 
 | 6300 | 		 * When RSS is enabled, the status block format changes | 
 | 6301 | 		 * slightly.  The "rx_jumbo_consumer", "reserved", | 
 | 6302 | 		 * and "rx_mini_consumer" members get mapped to the | 
 | 6303 | 		 * other three rx return ring producer indexes. | 
 | 6304 | 		 */ | 
 | 6305 | 		switch (i) { | 
 | 6306 | 		default: | 
 | 6307 | 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; | 
 | 6308 | 			break; | 
 | 6309 | 		case 2: | 
 | 6310 | 			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; | 
 | 6311 | 			break; | 
 | 6312 | 		case 3: | 
 | 6313 | 			tnapi->rx_rcb_prod_idx = &sblk->reserved; | 
 | 6314 | 			break; | 
 | 6315 | 		case 4: | 
 | 6316 | 			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; | 
 | 6317 | 			break; | 
 | 6318 | 		} | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6319 |  | 
| Matt Carlson | b196c7e | 2009-11-13 13:03:50 +0000 | [diff] [blame] | 6320 | 		if (tp->irq_cnt == 1) | 
 | 6321 | 			tnapi->prodring = &tp->prodring[0]; | 
 | 6322 | 		else if (i) | 
 | 6323 | 			tnapi->prodring = &tp->prodring[i - 1]; | 
 | 6324 |  | 
| Matt Carlson | 0c1d0e2 | 2009-09-01 13:16:33 +0000 | [diff] [blame] | 6325 | 		/* | 
 | 6326 | 		 * If multivector RSS is enabled, vector 0 does not handle | 
 | 6327 | 		 * rx or tx interrupts.  Don't allocate any resources for it. | 
 | 6328 | 		 */ | 
 | 6329 | 		if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) | 
 | 6330 | 			continue; | 
 | 6331 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6332 | 		tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, | 
 | 6333 | 						     TG3_RX_RCB_RING_BYTES(tp), | 
 | 6334 | 						     &tnapi->rx_rcb_mapping); | 
 | 6335 | 		if (!tnapi->rx_rcb) | 
 | 6336 | 			goto err_out; | 
 | 6337 |  | 
 | 6338 | 		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6339 | 	} | 
 | 6340 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6341 | 	return 0; | 
 | 6342 |  | 
 | 6343 | err_out: | 
 | 6344 | 	tg3_free_consistent(tp); | 
 | 6345 | 	return -ENOMEM; | 
 | 6346 | } | 
 | 6347 |  | 
 | 6348 | #define MAX_WAIT_CNT 1000 | 
 | 6349 |  | 
 | 6350 | /* To stop a block, clear the enable bit and poll till it | 
 | 6351 |  * clears.  tp->lock is held. | 
 | 6352 |  */ | 
| David S. Miller | b3b7d6b | 2005-05-05 14:40:20 -0700 | [diff] [blame] | 6353 | static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6354 | { | 
 | 6355 | 	unsigned int i; | 
 | 6356 | 	u32 val; | 
 | 6357 |  | 
 | 6358 | 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 
 | 6359 | 		switch (ofs) { | 
 | 6360 | 		case RCVLSC_MODE: | 
 | 6361 | 		case DMAC_MODE: | 
 | 6362 | 		case MBFREE_MODE: | 
 | 6363 | 		case BUFMGR_MODE: | 
 | 6364 | 		case MEMARB_MODE: | 
 | 6365 | 			/* We can't enable/disable these bits of the | 
 | 6366 | 			 * 5705/5750, just say success. | 
 | 6367 | 			 */ | 
 | 6368 | 			return 0; | 
 | 6369 |  | 
 | 6370 | 		default: | 
 | 6371 | 			break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 6372 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6373 | 	} | 
 | 6374 |  | 
 | 6375 | 	val = tr32(ofs); | 
 | 6376 | 	val &= ~enable_bit; | 
 | 6377 | 	tw32_f(ofs, val); | 
 | 6378 |  | 
 | 6379 | 	for (i = 0; i < MAX_WAIT_CNT; i++) { | 
 | 6380 | 		udelay(100); | 
 | 6381 | 		val = tr32(ofs); | 
 | 6382 | 		if ((val & enable_bit) == 0) | 
 | 6383 | 			break; | 
 | 6384 | 	} | 
 | 6385 |  | 
| David S. Miller | b3b7d6b | 2005-05-05 14:40:20 -0700 | [diff] [blame] | 6386 | 	if (i == MAX_WAIT_CNT && !silent) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6387 | 		printk(KERN_ERR PFX "tg3_stop_block timed out, " | 
 | 6388 | 		       "ofs=%lx enable_bit=%x\n", | 
 | 6389 | 		       ofs, enable_bit); | 
 | 6390 | 		return -ENODEV; | 
 | 6391 | 	} | 
 | 6392 |  | 
 | 6393 | 	return 0; | 
 | 6394 | } | 
 | 6395 |  | 
 | 6396 | /* tp->lock is held. */ | 
| David S. Miller | b3b7d6b | 2005-05-05 14:40:20 -0700 | [diff] [blame] | 6397 | static int tg3_abort_hw(struct tg3 *tp, int silent) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6398 | { | 
 | 6399 | 	int i, err; | 
 | 6400 |  | 
 | 6401 | 	tg3_disable_ints(tp); | 
 | 6402 |  | 
 | 6403 | 	tp->rx_mode &= ~RX_MODE_ENABLE; | 
 | 6404 | 	tw32_f(MAC_RX_MODE, tp->rx_mode); | 
 | 6405 | 	udelay(10); | 
 | 6406 |  | 
| David S. Miller | b3b7d6b | 2005-05-05 14:40:20 -0700 | [diff] [blame] | 6407 | 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); | 
 | 6408 | 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); | 
 | 6409 | 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); | 
 | 6410 | 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); | 
 | 6411 | 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); | 
 | 6412 | 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6413 |  | 
| David S. Miller | b3b7d6b | 2005-05-05 14:40:20 -0700 | [diff] [blame] | 6414 | 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); | 
 | 6415 | 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); | 
 | 6416 | 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); | 
 | 6417 | 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); | 
 | 6418 | 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); | 
 | 6419 | 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); | 
 | 6420 | 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6421 |  | 
 | 6422 | 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; | 
 | 6423 | 	tw32_f(MAC_MODE, tp->mac_mode); | 
 | 6424 | 	udelay(40); | 
 | 6425 |  | 
 | 6426 | 	tp->tx_mode &= ~TX_MODE_ENABLE; | 
 | 6427 | 	tw32_f(MAC_TX_MODE, tp->tx_mode); | 
 | 6428 |  | 
 | 6429 | 	for (i = 0; i < MAX_WAIT_CNT; i++) { | 
 | 6430 | 		udelay(100); | 
 | 6431 | 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) | 
 | 6432 | 			break; | 
 | 6433 | 	} | 
 | 6434 | 	if (i >= MAX_WAIT_CNT) { | 
 | 6435 | 		printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, " | 
 | 6436 | 		       "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", | 
 | 6437 | 		       tp->dev->name, tr32(MAC_TX_MODE)); | 
| Michael Chan | e6de8ad | 2005-05-05 14:42:41 -0700 | [diff] [blame] | 6438 | 		err |= -ENODEV; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6439 | 	} | 
 | 6440 |  | 
| Michael Chan | e6de8ad | 2005-05-05 14:42:41 -0700 | [diff] [blame] | 6441 | 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); | 
| David S. Miller | b3b7d6b | 2005-05-05 14:40:20 -0700 | [diff] [blame] | 6442 | 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); | 
 | 6443 | 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6444 |  | 
 | 6445 | 	tw32(FTQ_RESET, 0xffffffff); | 
 | 6446 | 	tw32(FTQ_RESET, 0x00000000); | 
 | 6447 |  | 
| David S. Miller | b3b7d6b | 2005-05-05 14:40:20 -0700 | [diff] [blame] | 6448 | 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); | 
 | 6449 | 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6450 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6451 | 	for (i = 0; i < tp->irq_cnt; i++) { | 
 | 6452 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
 | 6453 | 		if (tnapi->hw_status) | 
 | 6454 | 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 
 | 6455 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6456 | 	if (tp->hw_stats) | 
 | 6457 | 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); | 
 | 6458 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6459 | 	return err; | 
 | 6460 | } | 
 | 6461 |  | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 6462 | static void tg3_ape_send_event(struct tg3 *tp, u32 event) | 
 | 6463 | { | 
 | 6464 | 	int i; | 
 | 6465 | 	u32 apedata; | 
 | 6466 |  | 
 | 6467 | 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); | 
 | 6468 | 	if (apedata != APE_SEG_SIG_MAGIC) | 
 | 6469 | 		return; | 
 | 6470 |  | 
 | 6471 | 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); | 
| Matt Carlson | 731fd79 | 2008-08-15 14:07:51 -0700 | [diff] [blame] | 6472 | 	if (!(apedata & APE_FW_STATUS_READY)) | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 6473 | 		return; | 
 | 6474 |  | 
 | 6475 | 	/* Wait for up to 1 millisecond for APE to service previous event. */ | 
 | 6476 | 	for (i = 0; i < 10; i++) { | 
 | 6477 | 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) | 
 | 6478 | 			return; | 
 | 6479 |  | 
 | 6480 | 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); | 
 | 6481 |  | 
 | 6482 | 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) | 
 | 6483 | 			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, | 
 | 6484 | 					event | APE_EVENT_STATUS_EVENT_PENDING); | 
 | 6485 |  | 
 | 6486 | 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); | 
 | 6487 |  | 
 | 6488 | 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) | 
 | 6489 | 			break; | 
 | 6490 |  | 
 | 6491 | 		udelay(100); | 
 | 6492 | 	} | 
 | 6493 |  | 
 | 6494 | 	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) | 
 | 6495 | 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); | 
 | 6496 | } | 
 | 6497 |  | 
 | 6498 | static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | 
 | 6499 | { | 
 | 6500 | 	u32 event; | 
 | 6501 | 	u32 apedata; | 
 | 6502 |  | 
 | 6503 | 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) | 
 | 6504 | 		return; | 
 | 6505 |  | 
 | 6506 | 	switch (kind) { | 
 | 6507 | 		case RESET_KIND_INIT: | 
 | 6508 | 			tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, | 
 | 6509 | 					APE_HOST_SEG_SIG_MAGIC); | 
 | 6510 | 			tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, | 
 | 6511 | 					APE_HOST_SEG_LEN_MAGIC); | 
 | 6512 | 			apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); | 
 | 6513 | 			tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); | 
 | 6514 | 			tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, | 
 | 6515 | 					APE_HOST_DRIVER_ID_MAGIC); | 
 | 6516 | 			tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, | 
 | 6517 | 					APE_HOST_BEHAV_NO_PHYLOCK); | 
 | 6518 |  | 
 | 6519 | 			event = APE_EVENT_STATUS_STATE_START; | 
 | 6520 | 			break; | 
 | 6521 | 		case RESET_KIND_SHUTDOWN: | 
| Matt Carlson | b2aee15 | 2008-11-03 16:51:11 -0800 | [diff] [blame] | 6522 | 			/* With the interface we are currently using, | 
 | 6523 | 			 * APE does not track driver state.  Wiping | 
 | 6524 | 			 * out the HOST SEGMENT SIGNATURE forces | 
 | 6525 | 			 * the APE to assume OS absent status. | 
 | 6526 | 			 */ | 
 | 6527 | 			tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); | 
 | 6528 |  | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 6529 | 			event = APE_EVENT_STATUS_STATE_UNLOAD; | 
 | 6530 | 			break; | 
 | 6531 | 		case RESET_KIND_SUSPEND: | 
 | 6532 | 			event = APE_EVENT_STATUS_STATE_SUSPEND; | 
 | 6533 | 			break; | 
 | 6534 | 		default: | 
 | 6535 | 			return; | 
 | 6536 | 	} | 
 | 6537 |  | 
 | 6538 | 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; | 
 | 6539 |  | 
 | 6540 | 	tg3_ape_send_event(tp, event); | 
 | 6541 | } | 
 | 6542 |  | 
| Michael Chan | e6af301 | 2005-04-21 17:12:05 -0700 | [diff] [blame] | 6543 | /* tp->lock is held. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6544 | static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) | 
 | 6545 | { | 
| David S. Miller | f49639e | 2006-06-09 11:58:36 -0700 | [diff] [blame] | 6546 | 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, | 
 | 6547 | 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6548 |  | 
 | 6549 | 	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { | 
 | 6550 | 		switch (kind) { | 
 | 6551 | 		case RESET_KIND_INIT: | 
 | 6552 | 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 
 | 6553 | 				      DRV_STATE_START); | 
 | 6554 | 			break; | 
 | 6555 |  | 
 | 6556 | 		case RESET_KIND_SHUTDOWN: | 
 | 6557 | 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 
 | 6558 | 				      DRV_STATE_UNLOAD); | 
 | 6559 | 			break; | 
 | 6560 |  | 
 | 6561 | 		case RESET_KIND_SUSPEND: | 
 | 6562 | 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 
 | 6563 | 				      DRV_STATE_SUSPEND); | 
 | 6564 | 			break; | 
 | 6565 |  | 
 | 6566 | 		default: | 
 | 6567 | 			break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 6568 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6569 | 	} | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 6570 |  | 
 | 6571 | 	if (kind == RESET_KIND_INIT || | 
 | 6572 | 	    kind == RESET_KIND_SUSPEND) | 
 | 6573 | 		tg3_ape_driver_state_change(tp, kind); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6574 | } | 
 | 6575 |  | 
 | 6576 | /* tp->lock is held. */ | 
 | 6577 | static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) | 
 | 6578 | { | 
 | 6579 | 	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) { | 
 | 6580 | 		switch (kind) { | 
 | 6581 | 		case RESET_KIND_INIT: | 
 | 6582 | 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 
 | 6583 | 				      DRV_STATE_START_DONE); | 
 | 6584 | 			break; | 
 | 6585 |  | 
 | 6586 | 		case RESET_KIND_SHUTDOWN: | 
 | 6587 | 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 
 | 6588 | 				      DRV_STATE_UNLOAD_DONE); | 
 | 6589 | 			break; | 
 | 6590 |  | 
 | 6591 | 		default: | 
 | 6592 | 			break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 6593 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6594 | 	} | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 6595 |  | 
 | 6596 | 	if (kind == RESET_KIND_SHUTDOWN) | 
 | 6597 | 		tg3_ape_driver_state_change(tp, kind); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6598 | } | 
 | 6599 |  | 
 | 6600 | /* tp->lock is held. */ | 
 | 6601 | static void tg3_write_sig_legacy(struct tg3 *tp, int kind) | 
 | 6602 | { | 
 | 6603 | 	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 
 | 6604 | 		switch (kind) { | 
 | 6605 | 		case RESET_KIND_INIT: | 
 | 6606 | 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 
 | 6607 | 				      DRV_STATE_START); | 
 | 6608 | 			break; | 
 | 6609 |  | 
 | 6610 | 		case RESET_KIND_SHUTDOWN: | 
 | 6611 | 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 
 | 6612 | 				      DRV_STATE_UNLOAD); | 
 | 6613 | 			break; | 
 | 6614 |  | 
 | 6615 | 		case RESET_KIND_SUSPEND: | 
 | 6616 | 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, | 
 | 6617 | 				      DRV_STATE_SUSPEND); | 
 | 6618 | 			break; | 
 | 6619 |  | 
 | 6620 | 		default: | 
 | 6621 | 			break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 6622 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6623 | 	} | 
 | 6624 | } | 
 | 6625 |  | 
| Michael Chan | 7a6f436 | 2006-09-27 16:03:31 -0700 | [diff] [blame] | 6626 | static int tg3_poll_fw(struct tg3 *tp) | 
 | 6627 | { | 
 | 6628 | 	int i; | 
 | 6629 | 	u32 val; | 
 | 6630 |  | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 6631 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
| Gary Zambrano | 0ccead1 | 2006-11-14 16:34:00 -0800 | [diff] [blame] | 6632 | 		/* Wait up to 20ms for init done. */ | 
 | 6633 | 		for (i = 0; i < 200; i++) { | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 6634 | 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) | 
 | 6635 | 				return 0; | 
| Gary Zambrano | 0ccead1 | 2006-11-14 16:34:00 -0800 | [diff] [blame] | 6636 | 			udelay(100); | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 6637 | 		} | 
 | 6638 | 		return -ENODEV; | 
 | 6639 | 	} | 
 | 6640 |  | 
| Michael Chan | 7a6f436 | 2006-09-27 16:03:31 -0700 | [diff] [blame] | 6641 | 	/* Wait for firmware initialization to complete. */ | 
 | 6642 | 	for (i = 0; i < 100000; i++) { | 
 | 6643 | 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); | 
 | 6644 | 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) | 
 | 6645 | 			break; | 
 | 6646 | 		udelay(10); | 
 | 6647 | 	} | 
 | 6648 |  | 
 | 6649 | 	/* Chip might not be fitted with firmware.  Some Sun onboard | 
 | 6650 | 	 * parts are configured like that.  So don't signal the timeout | 
 | 6651 | 	 * of the above loop as an error, but do report the lack of | 
 | 6652 | 	 * running firmware once. | 
 | 6653 | 	 */ | 
 | 6654 | 	if (i >= 100000 && | 
 | 6655 | 	    !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) { | 
 | 6656 | 		tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED; | 
 | 6657 |  | 
 | 6658 | 		printk(KERN_INFO PFX "%s: No firmware running.\n", | 
 | 6659 | 		       tp->dev->name); | 
 | 6660 | 	} | 
 | 6661 |  | 
 | 6662 | 	return 0; | 
 | 6663 | } | 
 | 6664 |  | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6665 | /* Save PCI command register before chip reset */ | 
 | 6666 | static void tg3_save_pci_state(struct tg3 *tp) | 
 | 6667 | { | 
| Matt Carlson | 8a6eac9 | 2007-10-21 16:17:55 -0700 | [diff] [blame] | 6668 | 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6669 | } | 
 | 6670 |  | 
 | 6671 | /* Restore PCI state after chip reset */ | 
 | 6672 | static void tg3_restore_pci_state(struct tg3 *tp) | 
 | 6673 | { | 
 | 6674 | 	u32 val; | 
 | 6675 |  | 
 | 6676 | 	/* Re-enable indirect register accesses. */ | 
 | 6677 | 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 
 | 6678 | 			       tp->misc_host_ctrl); | 
 | 6679 |  | 
 | 6680 | 	/* Set MAX PCI retry to zero. */ | 
 | 6681 | 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); | 
 | 6682 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && | 
 | 6683 | 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) | 
 | 6684 | 		val |= PCISTATE_RETRY_SAME_DMA; | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 6685 | 	/* Allow reads and writes to the APE register and memory space. */ | 
 | 6686 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) | 
 | 6687 | 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR | | 
 | 6688 | 		       PCISTATE_ALLOW_APE_SHMEM_WR; | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6689 | 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); | 
 | 6690 |  | 
| Matt Carlson | 8a6eac9 | 2007-10-21 16:17:55 -0700 | [diff] [blame] | 6691 | 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6692 |  | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 6693 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { | 
 | 6694 | 		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) | 
 | 6695 | 			pcie_set_readrq(tp->pdev, 4096); | 
 | 6696 | 		else { | 
 | 6697 | 			pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, | 
 | 6698 | 					      tp->pci_cacheline_sz); | 
 | 6699 | 			pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, | 
 | 6700 | 					      tp->pci_lat_timer); | 
 | 6701 | 		} | 
| Michael Chan | 114342f | 2007-10-15 02:12:26 -0700 | [diff] [blame] | 6702 | 	} | 
| Matt Carlson | 5f5c51e | 2007-11-12 21:19:37 -0800 | [diff] [blame] | 6703 |  | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6704 | 	/* Make sure PCI-X relaxed ordering bit is clear. */ | 
| Matt Carlson | 52f4490 | 2008-11-21 17:17:04 -0800 | [diff] [blame] | 6705 | 	if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { | 
| Matt Carlson | 9974a35 | 2007-10-07 23:27:28 -0700 | [diff] [blame] | 6706 | 		u16 pcix_cmd; | 
 | 6707 |  | 
 | 6708 | 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, | 
 | 6709 | 				     &pcix_cmd); | 
 | 6710 | 		pcix_cmd &= ~PCI_X_CMD_ERO; | 
 | 6711 | 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, | 
 | 6712 | 				      pcix_cmd); | 
 | 6713 | 	} | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6714 |  | 
 | 6715 | 	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6716 |  | 
 | 6717 | 		/* Chip reset on 5780 will reset MSI enable bit, | 
 | 6718 | 		 * so need to restore it. | 
 | 6719 | 		 */ | 
 | 6720 | 		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 
 | 6721 | 			u16 ctrl; | 
 | 6722 |  | 
 | 6723 | 			pci_read_config_word(tp->pdev, | 
 | 6724 | 					     tp->msi_cap + PCI_MSI_FLAGS, | 
 | 6725 | 					     &ctrl); | 
 | 6726 | 			pci_write_config_word(tp->pdev, | 
 | 6727 | 					      tp->msi_cap + PCI_MSI_FLAGS, | 
 | 6728 | 					      ctrl | PCI_MSI_FLAGS_ENABLE); | 
 | 6729 | 			val = tr32(MSGINT_MODE); | 
 | 6730 | 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); | 
 | 6731 | 		} | 
 | 6732 | 	} | 
 | 6733 | } | 
 | 6734 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6735 | static void tg3_stop_fw(struct tg3 *); | 
 | 6736 |  | 
 | 6737 | /* tp->lock is held. */ | 
 | 6738 | static int tg3_chip_reset(struct tg3 *tp) | 
 | 6739 | { | 
 | 6740 | 	u32 val; | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 6741 | 	void (*write_op)(struct tg3 *, u32, u32); | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 6742 | 	int i, err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6743 |  | 
| David S. Miller | f49639e | 2006-06-09 11:58:36 -0700 | [diff] [blame] | 6744 | 	tg3_nvram_lock(tp); | 
 | 6745 |  | 
| Matt Carlson | 77b483f | 2008-08-15 14:07:24 -0700 | [diff] [blame] | 6746 | 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC); | 
 | 6747 |  | 
| David S. Miller | f49639e | 2006-06-09 11:58:36 -0700 | [diff] [blame] | 6748 | 	/* No matching tg3_nvram_unlock() after this because | 
 | 6749 | 	 * chip reset below will undo the nvram lock. | 
 | 6750 | 	 */ | 
 | 6751 | 	tp->nvram_lock_cnt = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6752 |  | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6753 | 	/* GRC_MISC_CFG core clock reset will clear the memory | 
 | 6754 | 	 * enable bit in PCI register 4 and the MSI enable bit | 
 | 6755 | 	 * on some chips, so we save relevant registers here. | 
 | 6756 | 	 */ | 
 | 6757 | 	tg3_save_pci_state(tp); | 
 | 6758 |  | 
| Michael Chan | d9ab5ad | 2006-03-20 22:27:35 -0800 | [diff] [blame] | 6759 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 6760 | 	    (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) | 
| Michael Chan | d9ab5ad | 2006-03-20 22:27:35 -0800 | [diff] [blame] | 6761 | 		tw32(GRC_FASTBOOT_PC, 0); | 
 | 6762 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6763 | 	/* | 
 | 6764 | 	 * We must avoid the readl() that normally takes place. | 
 | 6765 | 	 * It locks machines, causes machine checks, and other | 
 | 6766 | 	 * fun things.  So, temporarily disable the 5701 | 
 | 6767 | 	 * hardware workaround, while we do the reset. | 
 | 6768 | 	 */ | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 6769 | 	write_op = tp->write32; | 
 | 6770 | 	if (write_op == tg3_write_flush_reg32) | 
 | 6771 | 		tp->write32 = tg3_write32; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6772 |  | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 6773 | 	/* Prevent the irq handler from reading or writing PCI registers | 
 | 6774 | 	 * during chip reset when the memory enable bit in the PCI command | 
 | 6775 | 	 * register may be cleared.  The chip does not generate interrupt | 
 | 6776 | 	 * at this time, but the irq handler may still be called due to irq | 
 | 6777 | 	 * sharing or irqpoll. | 
 | 6778 | 	 */ | 
 | 6779 | 	tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING; | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 6780 | 	for (i = 0; i < tp->irq_cnt; i++) { | 
 | 6781 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
 | 6782 | 		if (tnapi->hw_status) { | 
 | 6783 | 			tnapi->hw_status->status = 0; | 
 | 6784 | 			tnapi->hw_status->status_tag = 0; | 
 | 6785 | 		} | 
 | 6786 | 		tnapi->last_tag = 0; | 
 | 6787 | 		tnapi->last_irq_tag = 0; | 
| Michael Chan | b8fa2f3 | 2007-04-06 17:35:37 -0700 | [diff] [blame] | 6788 | 	} | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 6789 | 	smp_mb(); | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 6790 |  | 
 | 6791 | 	for (i = 0; i < tp->irq_cnt; i++) | 
 | 6792 | 		synchronize_irq(tp->napi[i].irq_vec); | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 6793 |  | 
| Matt Carlson | 255ca31 | 2009-08-25 10:07:27 +0000 | [diff] [blame] | 6794 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { | 
 | 6795 | 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; | 
 | 6796 | 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); | 
 | 6797 | 	} | 
 | 6798 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6799 | 	/* do the reset */ | 
 | 6800 | 	val = GRC_MISC_CFG_CORECLK_RESET; | 
 | 6801 |  | 
 | 6802 | 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 
 | 6803 | 		if (tr32(0x7e2c) == 0x60) { | 
 | 6804 | 			tw32(0x7e2c, 0x20); | 
 | 6805 | 		} | 
 | 6806 | 		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { | 
 | 6807 | 			tw32(GRC_MISC_CFG, (1 << 29)); | 
 | 6808 | 			val |= (1 << 29); | 
 | 6809 | 		} | 
 | 6810 | 	} | 
 | 6811 |  | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 6812 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
 | 6813 | 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); | 
 | 6814 | 		tw32(GRC_VCPU_EXT_CTRL, | 
 | 6815 | 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); | 
 | 6816 | 	} | 
 | 6817 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6818 | 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 
 | 6819 | 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER; | 
 | 6820 | 	tw32(GRC_MISC_CFG, val); | 
 | 6821 |  | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 6822 | 	/* restore 5701 hardware bug workaround write method */ | 
 | 6823 | 	tp->write32 = write_op; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6824 |  | 
 | 6825 | 	/* Unfortunately, we have to delay before the PCI read back. | 
 | 6826 | 	 * Some 575X chips even will not respond to a PCI cfg access | 
 | 6827 | 	 * when the reset command is given to the chip. | 
 | 6828 | 	 * | 
 | 6829 | 	 * How do these hardware designers expect things to work | 
 | 6830 | 	 * properly if the PCI write is posted for a long period | 
 | 6831 | 	 * of time?  It is always necessary to have some method by | 
 | 6832 | 	 * which a register read back can occur to push the write | 
 | 6833 | 	 * out which does the reset. | 
 | 6834 | 	 * | 
 | 6835 | 	 * For most tg3 variants the trick below was working. | 
 | 6836 | 	 * Ho hum... | 
 | 6837 | 	 */ | 
 | 6838 | 	udelay(120); | 
 | 6839 |  | 
 | 6840 | 	/* Flush PCI posted writes.  The normal MMIO registers | 
 | 6841 | 	 * are inaccessible at this time so this is the only | 
 | 6842 | 	 * way to make this reliably (actually, this is no longer | 
 | 6843 | 	 * the case, see above).  I tried to use indirect | 
 | 6844 | 	 * register read/write but this upset some 5701 variants. | 
 | 6845 | 	 */ | 
 | 6846 | 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); | 
 | 6847 |  | 
 | 6848 | 	udelay(120); | 
 | 6849 |  | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 6850 | 	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) { | 
| Matt Carlson | e712699 | 2009-08-25 10:08:16 +0000 | [diff] [blame] | 6851 | 		u16 val16; | 
 | 6852 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6853 | 		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { | 
 | 6854 | 			int i; | 
 | 6855 | 			u32 cfg_val; | 
 | 6856 |  | 
 | 6857 | 			/* Wait for link training to complete.  */ | 
 | 6858 | 			for (i = 0; i < 5000; i++) | 
 | 6859 | 				udelay(100); | 
 | 6860 |  | 
 | 6861 | 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); | 
 | 6862 | 			pci_write_config_dword(tp->pdev, 0xc4, | 
 | 6863 | 					       cfg_val | (1 << 15)); | 
 | 6864 | 		} | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 6865 |  | 
| Matt Carlson | e712699 | 2009-08-25 10:08:16 +0000 | [diff] [blame] | 6866 | 		/* Clear the "no snoop" and "relaxed ordering" bits. */ | 
 | 6867 | 		pci_read_config_word(tp->pdev, | 
 | 6868 | 				     tp->pcie_cap + PCI_EXP_DEVCTL, | 
 | 6869 | 				     &val16); | 
 | 6870 | 		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | | 
 | 6871 | 			   PCI_EXP_DEVCTL_NOSNOOP_EN); | 
 | 6872 | 		/* | 
 | 6873 | 		 * Older PCIe devices only support the 128 byte | 
 | 6874 | 		 * MPS setting.  Enforce the restriction. | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 6875 | 		 */ | 
| Matt Carlson | e712699 | 2009-08-25 10:08:16 +0000 | [diff] [blame] | 6876 | 		if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || | 
 | 6877 | 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)) | 
 | 6878 | 			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 6879 | 		pci_write_config_word(tp->pdev, | 
 | 6880 | 				      tp->pcie_cap + PCI_EXP_DEVCTL, | 
| Matt Carlson | e712699 | 2009-08-25 10:08:16 +0000 | [diff] [blame] | 6881 | 				      val16); | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 6882 |  | 
 | 6883 | 		pcie_set_readrq(tp->pdev, 4096); | 
 | 6884 |  | 
 | 6885 | 		/* Clear error status */ | 
 | 6886 | 		pci_write_config_word(tp->pdev, | 
 | 6887 | 				      tp->pcie_cap + PCI_EXP_DEVSTA, | 
 | 6888 | 				      PCI_EXP_DEVSTA_CED | | 
 | 6889 | 				      PCI_EXP_DEVSTA_NFED | | 
 | 6890 | 				      PCI_EXP_DEVSTA_FED | | 
 | 6891 | 				      PCI_EXP_DEVSTA_URD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6892 | 	} | 
 | 6893 |  | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6894 | 	tg3_restore_pci_state(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6895 |  | 
| Michael Chan | d18edcb | 2007-03-24 20:57:11 -0700 | [diff] [blame] | 6896 | 	tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING; | 
 | 6897 |  | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6898 | 	val = 0; | 
 | 6899 | 	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | 
| Michael Chan | 4cf78e4 | 2005-07-25 12:29:19 -0700 | [diff] [blame] | 6900 | 		val = tr32(MEMARB_MODE); | 
| Michael Chan | ee6a99b | 2007-07-18 21:49:10 -0700 | [diff] [blame] | 6901 | 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6902 |  | 
 | 6903 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { | 
 | 6904 | 		tg3_stop_fw(tp); | 
 | 6905 | 		tw32(0x5000, 0x400); | 
 | 6906 | 	} | 
 | 6907 |  | 
 | 6908 | 	tw32(GRC_MODE, tp->grc_mode); | 
 | 6909 |  | 
 | 6910 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { | 
| Andy Gospodarek | ab0049b | 2007-09-06 20:42:14 +0100 | [diff] [blame] | 6911 | 		val = tr32(0xc4); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6912 |  | 
 | 6913 | 		tw32(0xc4, val | (1 << 15)); | 
 | 6914 | 	} | 
 | 6915 |  | 
 | 6916 | 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && | 
 | 6917 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 
 | 6918 | 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; | 
 | 6919 | 		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) | 
 | 6920 | 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; | 
 | 6921 | 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); | 
 | 6922 | 	} | 
 | 6923 |  | 
 | 6924 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 
 | 6925 | 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI; | 
 | 6926 | 		tw32_f(MAC_MODE, tp->mac_mode); | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 6927 | 	} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 
 | 6928 | 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII; | 
 | 6929 | 		tw32_f(MAC_MODE, tp->mac_mode); | 
| Matt Carlson | 3bda125 | 2008-08-15 14:08:22 -0700 | [diff] [blame] | 6930 | 	} else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 
 | 6931 | 		tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); | 
 | 6932 | 		if (tp->mac_mode & MAC_MODE_APE_TX_EN) | 
 | 6933 | 			tp->mac_mode |= MAC_MODE_TDE_ENABLE; | 
 | 6934 | 		tw32_f(MAC_MODE, tp->mac_mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6935 | 	} else | 
 | 6936 | 		tw32_f(MAC_MODE, 0); | 
 | 6937 | 	udelay(40); | 
 | 6938 |  | 
| Matt Carlson | 77b483f | 2008-08-15 14:07:24 -0700 | [diff] [blame] | 6939 | 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); | 
 | 6940 |  | 
| Michael Chan | 7a6f436 | 2006-09-27 16:03:31 -0700 | [diff] [blame] | 6941 | 	err = tg3_poll_fw(tp); | 
 | 6942 | 	if (err) | 
 | 6943 | 		return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6944 |  | 
| Matt Carlson | 0a9140c | 2009-08-28 12:27:50 +0000 | [diff] [blame] | 6945 | 	tg3_mdio_start(tp); | 
 | 6946 |  | 
| Matt Carlson | 52cdf85 | 2009-11-02 14:25:06 +0000 | [diff] [blame] | 6947 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { | 
 | 6948 | 		u8 phy_addr; | 
 | 6949 |  | 
 | 6950 | 		phy_addr = tp->phy_addr; | 
 | 6951 | 		tp->phy_addr = TG3_PHY_PCIE_ADDR; | 
 | 6952 |  | 
 | 6953 | 		tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, | 
 | 6954 | 			     TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT); | 
 | 6955 | 		val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL | | 
 | 6956 | 		      TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL | | 
 | 6957 | 		      TG3_PCIEPHY_TX0CTRL1_NB_EN; | 
 | 6958 | 		tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val); | 
 | 6959 | 		udelay(10); | 
 | 6960 |  | 
 | 6961 | 		tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, | 
 | 6962 | 			     TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT); | 
 | 6963 | 		val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN | | 
 | 6964 | 		      TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN; | 
 | 6965 | 		tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val); | 
 | 6966 | 		udelay(10); | 
 | 6967 |  | 
 | 6968 | 		tp->phy_addr = phy_addr; | 
 | 6969 | 	} | 
 | 6970 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6971 | 	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 6972 | 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && | 
 | 6973 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 6974 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && | 
 | 6975 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { | 
| Andy Gospodarek | ab0049b | 2007-09-06 20:42:14 +0100 | [diff] [blame] | 6976 | 		val = tr32(0x7c00); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6977 |  | 
 | 6978 | 		tw32(0x7c00, val | (1 << 25)); | 
 | 6979 | 	} | 
 | 6980 |  | 
 | 6981 | 	/* Reprobe ASF enable state.  */ | 
 | 6982 | 	tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF; | 
 | 6983 | 	tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE; | 
 | 6984 | 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); | 
 | 6985 | 	if (val == NIC_SRAM_DATA_SIG_MAGIC) { | 
 | 6986 | 		u32 nic_cfg; | 
 | 6987 |  | 
 | 6988 | 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); | 
 | 6989 | 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { | 
 | 6990 | 			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; | 
| Matt Carlson | 4ba526c | 2008-08-15 14:10:04 -0700 | [diff] [blame] | 6991 | 			tp->last_event_jiffies = jiffies; | 
| John W. Linville | cbf4685 | 2005-04-21 17:01:29 -0700 | [diff] [blame] | 6992 | 			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6993 | 				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; | 
 | 6994 | 		} | 
 | 6995 | 	} | 
 | 6996 |  | 
 | 6997 | 	return 0; | 
 | 6998 | } | 
 | 6999 |  | 
 | 7000 | /* tp->lock is held. */ | 
 | 7001 | static void tg3_stop_fw(struct tg3 *tp) | 
 | 7002 | { | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 7003 | 	if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | 
 | 7004 | 	   !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { | 
| Matt Carlson | 7c5026a | 2008-05-02 16:49:29 -0700 | [diff] [blame] | 7005 | 		/* Wait for RX cpu to ACK the previous event. */ | 
 | 7006 | 		tg3_wait_for_event_ack(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7007 |  | 
 | 7008 | 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); | 
| Matt Carlson | 4ba526c | 2008-08-15 14:10:04 -0700 | [diff] [blame] | 7009 |  | 
 | 7010 | 		tg3_generate_fw_event(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7011 |  | 
| Matt Carlson | 7c5026a | 2008-05-02 16:49:29 -0700 | [diff] [blame] | 7012 | 		/* Wait for RX cpu to ACK this event. */ | 
 | 7013 | 		tg3_wait_for_event_ack(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7014 | 	} | 
 | 7015 | } | 
 | 7016 |  | 
 | 7017 | /* tp->lock is held. */ | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 7018 | static int tg3_halt(struct tg3 *tp, int kind, int silent) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7019 | { | 
 | 7020 | 	int err; | 
 | 7021 |  | 
 | 7022 | 	tg3_stop_fw(tp); | 
 | 7023 |  | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 7024 | 	tg3_write_sig_pre_reset(tp, kind); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7025 |  | 
| David S. Miller | b3b7d6b | 2005-05-05 14:40:20 -0700 | [diff] [blame] | 7026 | 	tg3_abort_hw(tp, silent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7027 | 	err = tg3_chip_reset(tp); | 
 | 7028 |  | 
| Matt Carlson | daba2a6 | 2009-04-20 06:58:52 +0000 | [diff] [blame] | 7029 | 	__tg3_set_mac_addr(tp, 0); | 
 | 7030 |  | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 7031 | 	tg3_write_sig_legacy(tp, kind); | 
 | 7032 | 	tg3_write_sig_post_reset(tp, kind); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7033 |  | 
 | 7034 | 	if (err) | 
 | 7035 | 		return err; | 
 | 7036 |  | 
 | 7037 | 	return 0; | 
 | 7038 | } | 
 | 7039 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7040 | #define RX_CPU_SCRATCH_BASE	0x30000 | 
 | 7041 | #define RX_CPU_SCRATCH_SIZE	0x04000 | 
 | 7042 | #define TX_CPU_SCRATCH_BASE	0x34000 | 
 | 7043 | #define TX_CPU_SCRATCH_SIZE	0x04000 | 
 | 7044 |  | 
 | 7045 | /* tp->lock is held. */ | 
 | 7046 | static int tg3_halt_cpu(struct tg3 *tp, u32 offset) | 
 | 7047 | { | 
 | 7048 | 	int i; | 
 | 7049 |  | 
| Eric Sesterhenn | 5d9428d | 2006-04-02 13:52:48 +0200 | [diff] [blame] | 7050 | 	BUG_ON(offset == TX_CPU_BASE && | 
 | 7051 | 	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7052 |  | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 7053 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
 | 7054 | 		u32 val = tr32(GRC_VCPU_EXT_CTRL); | 
 | 7055 |  | 
 | 7056 | 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); | 
 | 7057 | 		return 0; | 
 | 7058 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7059 | 	if (offset == RX_CPU_BASE) { | 
 | 7060 | 		for (i = 0; i < 10000; i++) { | 
 | 7061 | 			tw32(offset + CPU_STATE, 0xffffffff); | 
 | 7062 | 			tw32(offset + CPU_MODE,  CPU_MODE_HALT); | 
 | 7063 | 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) | 
 | 7064 | 				break; | 
 | 7065 | 		} | 
 | 7066 |  | 
 | 7067 | 		tw32(offset + CPU_STATE, 0xffffffff); | 
 | 7068 | 		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT); | 
 | 7069 | 		udelay(10); | 
 | 7070 | 	} else { | 
 | 7071 | 		for (i = 0; i < 10000; i++) { | 
 | 7072 | 			tw32(offset + CPU_STATE, 0xffffffff); | 
 | 7073 | 			tw32(offset + CPU_MODE,  CPU_MODE_HALT); | 
 | 7074 | 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) | 
 | 7075 | 				break; | 
 | 7076 | 		} | 
 | 7077 | 	} | 
 | 7078 |  | 
 | 7079 | 	if (i >= 10000) { | 
 | 7080 | 		printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, " | 
 | 7081 | 		       "and %s CPU\n", | 
 | 7082 | 		       tp->dev->name, | 
 | 7083 | 		       (offset == RX_CPU_BASE ? "RX" : "TX")); | 
 | 7084 | 		return -ENODEV; | 
 | 7085 | 	} | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 7086 |  | 
 | 7087 | 	/* Clear firmware's nvram arbitration. */ | 
 | 7088 | 	if (tp->tg3_flags & TG3_FLAG_NVRAM) | 
 | 7089 | 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7090 | 	return 0; | 
 | 7091 | } | 
 | 7092 |  | 
 | 7093 | struct fw_info { | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7094 | 	unsigned int fw_base; | 
 | 7095 | 	unsigned int fw_len; | 
 | 7096 | 	const __be32 *fw_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7097 | }; | 
 | 7098 |  | 
 | 7099 | /* tp->lock is held. */ | 
 | 7100 | static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, | 
 | 7101 | 				 int cpu_scratch_size, struct fw_info *info) | 
 | 7102 | { | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 7103 | 	int err, lock_err, i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7104 | 	void (*write_op)(struct tg3 *, u32, u32); | 
 | 7105 |  | 
 | 7106 | 	if (cpu_base == TX_CPU_BASE && | 
 | 7107 | 	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 
 | 7108 | 		printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load " | 
 | 7109 | 		       "TX cpu firmware on %s which is 5705.\n", | 
 | 7110 | 		       tp->dev->name); | 
 | 7111 | 		return -EINVAL; | 
 | 7112 | 	} | 
 | 7113 |  | 
 | 7114 | 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 
 | 7115 | 		write_op = tg3_write_mem; | 
 | 7116 | 	else | 
 | 7117 | 		write_op = tg3_write_indirect_reg32; | 
 | 7118 |  | 
| Michael Chan | 1b62815 | 2005-05-29 14:59:49 -0700 | [diff] [blame] | 7119 | 	/* It is possible that bootcode is still loading at this point. | 
 | 7120 | 	 * Get the nvram lock first before halting the cpu. | 
 | 7121 | 	 */ | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 7122 | 	lock_err = tg3_nvram_lock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7123 | 	err = tg3_halt_cpu(tp, cpu_base); | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 7124 | 	if (!lock_err) | 
 | 7125 | 		tg3_nvram_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7126 | 	if (err) | 
 | 7127 | 		goto out; | 
 | 7128 |  | 
 | 7129 | 	for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) | 
 | 7130 | 		write_op(tp, cpu_scratch_base + i, 0); | 
 | 7131 | 	tw32(cpu_base + CPU_STATE, 0xffffffff); | 
 | 7132 | 	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7133 | 	for (i = 0; i < (info->fw_len / sizeof(u32)); i++) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7134 | 		write_op(tp, (cpu_scratch_base + | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7135 | 			      (info->fw_base & 0xffff) + | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7136 | 			      (i * sizeof(u32))), | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7137 | 			      be32_to_cpu(info->fw_data[i])); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7138 |  | 
 | 7139 | 	err = 0; | 
 | 7140 |  | 
 | 7141 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7142 | 	return err; | 
 | 7143 | } | 
 | 7144 |  | 
 | 7145 | /* tp->lock is held. */ | 
 | 7146 | static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) | 
 | 7147 | { | 
 | 7148 | 	struct fw_info info; | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7149 | 	const __be32 *fw_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7150 | 	int err, i; | 
 | 7151 |  | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7152 | 	fw_data = (void *)tp->fw->data; | 
 | 7153 |  | 
 | 7154 | 	/* Firmware blob starts with version numbers, followed by | 
 | 7155 | 	   start address and length. We are setting complete length. | 
 | 7156 | 	   length = end_address_of_bss - start_address_of_text. | 
 | 7157 | 	   Remainder is the blob to be loaded contiguously | 
 | 7158 | 	   from start address. */ | 
 | 7159 |  | 
 | 7160 | 	info.fw_base = be32_to_cpu(fw_data[1]); | 
 | 7161 | 	info.fw_len = tp->fw->size - 12; | 
 | 7162 | 	info.fw_data = &fw_data[3]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7163 |  | 
 | 7164 | 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, | 
 | 7165 | 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, | 
 | 7166 | 				    &info); | 
 | 7167 | 	if (err) | 
 | 7168 | 		return err; | 
 | 7169 |  | 
 | 7170 | 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, | 
 | 7171 | 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, | 
 | 7172 | 				    &info); | 
 | 7173 | 	if (err) | 
 | 7174 | 		return err; | 
 | 7175 |  | 
 | 7176 | 	/* Now startup only the RX cpu. */ | 
 | 7177 | 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7178 | 	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7179 |  | 
 | 7180 | 	for (i = 0; i < 5; i++) { | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7181 | 		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7182 | 			break; | 
 | 7183 | 		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); | 
 | 7184 | 		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT); | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7185 | 		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7186 | 		udelay(1000); | 
 | 7187 | 	} | 
 | 7188 | 	if (i >= 5) { | 
 | 7189 | 		printk(KERN_ERR PFX "tg3_load_firmware fails for %s " | 
 | 7190 | 		       "to set RX CPU PC, is %08x should be %08x\n", | 
 | 7191 | 		       tp->dev->name, tr32(RX_CPU_BASE + CPU_PC), | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7192 | 		       info.fw_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7193 | 		return -ENODEV; | 
 | 7194 | 	} | 
 | 7195 | 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); | 
 | 7196 | 	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000); | 
 | 7197 |  | 
 | 7198 | 	return 0; | 
 | 7199 | } | 
 | 7200 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7201 | /* 5705 needs a special version of the TSO firmware.  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7202 |  | 
 | 7203 | /* tp->lock is held. */ | 
 | 7204 | static int tg3_load_tso_firmware(struct tg3 *tp) | 
 | 7205 | { | 
 | 7206 | 	struct fw_info info; | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7207 | 	const __be32 *fw_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7208 | 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; | 
 | 7209 | 	int err, i; | 
 | 7210 |  | 
 | 7211 | 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 
 | 7212 | 		return 0; | 
 | 7213 |  | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7214 | 	fw_data = (void *)tp->fw->data; | 
 | 7215 |  | 
 | 7216 | 	/* Firmware blob starts with version numbers, followed by | 
 | 7217 | 	   start address and length. We are setting complete length. | 
 | 7218 | 	   length = end_address_of_bss - start_address_of_text. | 
 | 7219 | 	   Remainder is the blob to be loaded contiguously | 
 | 7220 | 	   from start address. */ | 
 | 7221 |  | 
 | 7222 | 	info.fw_base = be32_to_cpu(fw_data[1]); | 
 | 7223 | 	cpu_scratch_size = tp->fw_len; | 
 | 7224 | 	info.fw_len = tp->fw->size - 12; | 
 | 7225 | 	info.fw_data = &fw_data[3]; | 
 | 7226 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7227 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7228 | 		cpu_base = RX_CPU_BASE; | 
 | 7229 | 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7230 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7231 | 		cpu_base = TX_CPU_BASE; | 
 | 7232 | 		cpu_scratch_base = TX_CPU_SCRATCH_BASE; | 
 | 7233 | 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE; | 
 | 7234 | 	} | 
 | 7235 |  | 
 | 7236 | 	err = tg3_load_firmware_cpu(tp, cpu_base, | 
 | 7237 | 				    cpu_scratch_base, cpu_scratch_size, | 
 | 7238 | 				    &info); | 
 | 7239 | 	if (err) | 
 | 7240 | 		return err; | 
 | 7241 |  | 
 | 7242 | 	/* Now startup the cpu. */ | 
 | 7243 | 	tw32(cpu_base + CPU_STATE, 0xffffffff); | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7244 | 	tw32_f(cpu_base + CPU_PC, info.fw_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7245 |  | 
 | 7246 | 	for (i = 0; i < 5; i++) { | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7247 | 		if (tr32(cpu_base + CPU_PC) == info.fw_base) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7248 | 			break; | 
 | 7249 | 		tw32(cpu_base + CPU_STATE, 0xffffffff); | 
 | 7250 | 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT); | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7251 | 		tw32_f(cpu_base + CPU_PC, info.fw_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7252 | 		udelay(1000); | 
 | 7253 | 	} | 
 | 7254 | 	if (i >= 5) { | 
 | 7255 | 		printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s " | 
 | 7256 | 		       "to set CPU PC, is %08x should be %08x\n", | 
 | 7257 | 		       tp->dev->name, tr32(cpu_base + CPU_PC), | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7258 | 		       info.fw_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7259 | 		return -ENODEV; | 
 | 7260 | 	} | 
 | 7261 | 	tw32(cpu_base + CPU_STATE, 0xffffffff); | 
 | 7262 | 	tw32_f(cpu_base + CPU_MODE,  0x00000000); | 
 | 7263 | 	return 0; | 
 | 7264 | } | 
 | 7265 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7266 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7267 | static int tg3_set_mac_addr(struct net_device *dev, void *p) | 
 | 7268 | { | 
 | 7269 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 7270 | 	struct sockaddr *addr = p; | 
| Michael Chan | 986e0ae | 2007-05-05 12:10:20 -0700 | [diff] [blame] | 7271 | 	int err = 0, skip_mac_1 = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7272 |  | 
| Michael Chan | f9804dd | 2005-09-27 12:13:10 -0700 | [diff] [blame] | 7273 | 	if (!is_valid_ether_addr(addr->sa_data)) | 
 | 7274 | 		return -EINVAL; | 
 | 7275 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7276 | 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 
 | 7277 |  | 
| Michael Chan | e75f7c9 | 2006-03-20 21:33:26 -0800 | [diff] [blame] | 7278 | 	if (!netif_running(dev)) | 
 | 7279 | 		return 0; | 
 | 7280 |  | 
| Michael Chan | 58712ef | 2006-04-29 18:58:01 -0700 | [diff] [blame] | 7281 | 	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { | 
| Michael Chan | 986e0ae | 2007-05-05 12:10:20 -0700 | [diff] [blame] | 7282 | 		u32 addr0_high, addr0_low, addr1_high, addr1_low; | 
| Michael Chan | 58712ef | 2006-04-29 18:58:01 -0700 | [diff] [blame] | 7283 |  | 
| Michael Chan | 986e0ae | 2007-05-05 12:10:20 -0700 | [diff] [blame] | 7284 | 		addr0_high = tr32(MAC_ADDR_0_HIGH); | 
 | 7285 | 		addr0_low = tr32(MAC_ADDR_0_LOW); | 
 | 7286 | 		addr1_high = tr32(MAC_ADDR_1_HIGH); | 
 | 7287 | 		addr1_low = tr32(MAC_ADDR_1_LOW); | 
 | 7288 |  | 
 | 7289 | 		/* Skip MAC addr 1 if ASF is using it. */ | 
 | 7290 | 		if ((addr0_high != addr1_high || addr0_low != addr1_low) && | 
 | 7291 | 		    !(addr1_high == 0 && addr1_low == 0)) | 
 | 7292 | 			skip_mac_1 = 1; | 
| Michael Chan | 58712ef | 2006-04-29 18:58:01 -0700 | [diff] [blame] | 7293 | 	} | 
| Michael Chan | 986e0ae | 2007-05-05 12:10:20 -0700 | [diff] [blame] | 7294 | 	spin_lock_bh(&tp->lock); | 
 | 7295 | 	__tg3_set_mac_addr(tp, skip_mac_1); | 
 | 7296 | 	spin_unlock_bh(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7297 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 7298 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7299 | } | 
 | 7300 |  | 
 | 7301 | /* tp->lock is held. */ | 
 | 7302 | static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, | 
 | 7303 | 			   dma_addr_t mapping, u32 maxlen_flags, | 
 | 7304 | 			   u32 nic_addr) | 
 | 7305 | { | 
 | 7306 | 	tg3_write_mem(tp, | 
 | 7307 | 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), | 
 | 7308 | 		      ((u64) mapping >> 32)); | 
 | 7309 | 	tg3_write_mem(tp, | 
 | 7310 | 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), | 
 | 7311 | 		      ((u64) mapping & 0xffffffff)); | 
 | 7312 | 	tg3_write_mem(tp, | 
 | 7313 | 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), | 
 | 7314 | 		       maxlen_flags); | 
 | 7315 |  | 
 | 7316 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
 | 7317 | 		tg3_write_mem(tp, | 
 | 7318 | 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR), | 
 | 7319 | 			      nic_addr); | 
 | 7320 | } | 
 | 7321 |  | 
 | 7322 | static void __tg3_set_rx_mode(struct net_device *); | 
| Michael Chan | d244c89 | 2005-07-05 14:42:33 -0700 | [diff] [blame] | 7323 | static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 7324 | { | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7325 | 	int i; | 
 | 7326 |  | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 7327 | 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) { | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7328 | 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); | 
 | 7329 | 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); | 
 | 7330 | 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7331 | 	} else { | 
 | 7332 | 		tw32(HOSTCC_TXCOL_TICKS, 0); | 
 | 7333 | 		tw32(HOSTCC_TXMAX_FRAMES, 0); | 
 | 7334 | 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0); | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 7335 | 	} | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7336 |  | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 7337 | 	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { | 
 | 7338 | 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); | 
 | 7339 | 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); | 
 | 7340 | 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); | 
 | 7341 | 	} else { | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7342 | 		tw32(HOSTCC_RXCOL_TICKS, 0); | 
 | 7343 | 		tw32(HOSTCC_RXMAX_FRAMES, 0); | 
 | 7344 | 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0); | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 7345 | 	} | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7346 |  | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 7347 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 
 | 7348 | 		u32 val = ec->stats_block_coalesce_usecs; | 
 | 7349 |  | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7350 | 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); | 
 | 7351 | 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); | 
 | 7352 |  | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 7353 | 		if (!netif_carrier_ok(tp->dev)) | 
 | 7354 | 			val = 0; | 
 | 7355 |  | 
 | 7356 | 		tw32(HOSTCC_STAT_COAL_TICKS, val); | 
 | 7357 | 	} | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7358 |  | 
 | 7359 | 	for (i = 0; i < tp->irq_cnt - 1; i++) { | 
 | 7360 | 		u32 reg; | 
 | 7361 |  | 
 | 7362 | 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; | 
 | 7363 | 		tw32(reg, ec->rx_coalesce_usecs); | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7364 | 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; | 
 | 7365 | 		tw32(reg, ec->rx_max_coalesced_frames); | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7366 | 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; | 
 | 7367 | 		tw32(reg, ec->rx_max_coalesced_frames_irq); | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 7368 |  | 
 | 7369 | 		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { | 
 | 7370 | 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; | 
 | 7371 | 			tw32(reg, ec->tx_coalesce_usecs); | 
 | 7372 | 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; | 
 | 7373 | 			tw32(reg, ec->tx_max_coalesced_frames); | 
 | 7374 | 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; | 
 | 7375 | 			tw32(reg, ec->tx_max_coalesced_frames_irq); | 
 | 7376 | 		} | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7377 | 	} | 
 | 7378 |  | 
 | 7379 | 	for (; i < tp->irq_max - 1; i++) { | 
 | 7380 | 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7381 | 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7382 | 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 7383 |  | 
 | 7384 | 		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { | 
 | 7385 | 			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); | 
 | 7386 | 			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); | 
 | 7387 | 			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | 
 | 7388 | 		} | 
| Matt Carlson | b6080e1 | 2009-09-01 13:12:00 +0000 | [diff] [blame] | 7389 | 	} | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 7390 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7391 |  | 
 | 7392 | /* tp->lock is held. */ | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7393 | static void tg3_rings_reset(struct tg3 *tp) | 
 | 7394 | { | 
 | 7395 | 	int i; | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 7396 | 	u32 stblk, txrcb, rxrcb, limit; | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7397 | 	struct tg3_napi *tnapi = &tp->napi[0]; | 
 | 7398 |  | 
 | 7399 | 	/* Disable all transmit rings but the first. */ | 
 | 7400 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
 | 7401 | 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 7402 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
 | 7403 | 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7404 | 	else | 
 | 7405 | 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; | 
 | 7406 |  | 
 | 7407 | 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; | 
 | 7408 | 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE) | 
 | 7409 | 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, | 
 | 7410 | 			      BDINFO_FLAGS_DISABLED); | 
 | 7411 |  | 
 | 7412 |  | 
 | 7413 | 	/* Disable all receive return rings but the first. */ | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 7414 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 
 | 7415 | 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; | 
 | 7416 | 	else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7417 | 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 7418 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 
 | 7419 | 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7420 | 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; | 
 | 7421 | 	else | 
 | 7422 | 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; | 
 | 7423 |  | 
 | 7424 | 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; | 
 | 7425 | 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) | 
 | 7426 | 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, | 
 | 7427 | 			      BDINFO_FLAGS_DISABLED); | 
 | 7428 |  | 
 | 7429 | 	/* Disable interrupts */ | 
 | 7430 | 	tw32_mailbox_f(tp->napi[0].int_mbox, 1); | 
 | 7431 |  | 
 | 7432 | 	/* Zero mailbox registers. */ | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 7433 | 	if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { | 
 | 7434 | 		for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { | 
 | 7435 | 			tp->napi[i].tx_prod = 0; | 
 | 7436 | 			tp->napi[i].tx_cons = 0; | 
 | 7437 | 			tw32_mailbox(tp->napi[i].prodmbox, 0); | 
 | 7438 | 			tw32_rx_mbox(tp->napi[i].consmbox, 0); | 
 | 7439 | 			tw32_mailbox_f(tp->napi[i].int_mbox, 1); | 
 | 7440 | 		} | 
 | 7441 | 	} else { | 
 | 7442 | 		tp->napi[0].tx_prod = 0; | 
 | 7443 | 		tp->napi[0].tx_cons = 0; | 
 | 7444 | 		tw32_mailbox(tp->napi[0].prodmbox, 0); | 
 | 7445 | 		tw32_rx_mbox(tp->napi[0].consmbox, 0); | 
 | 7446 | 	} | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7447 |  | 
 | 7448 | 	/* Make sure the NIC-based send BD rings are disabled. */ | 
 | 7449 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 
 | 7450 | 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; | 
 | 7451 | 		for (i = 0; i < 16; i++) | 
 | 7452 | 			tw32_tx_mbox(mbox + i * 8, 0); | 
 | 7453 | 	} | 
 | 7454 |  | 
 | 7455 | 	txrcb = NIC_SRAM_SEND_RCB; | 
 | 7456 | 	rxrcb = NIC_SRAM_RCV_RET_RCB; | 
 | 7457 |  | 
 | 7458 | 	/* Clear status block in ram. */ | 
 | 7459 | 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 
 | 7460 |  | 
 | 7461 | 	/* Set status block DMA address */ | 
 | 7462 | 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, | 
 | 7463 | 	     ((u64) tnapi->status_mapping >> 32)); | 
 | 7464 | 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, | 
 | 7465 | 	     ((u64) tnapi->status_mapping & 0xffffffff)); | 
 | 7466 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 7467 | 	if (tnapi->tx_ring) { | 
 | 7468 | 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, | 
 | 7469 | 			       (TG3_TX_RING_SIZE << | 
 | 7470 | 				BDINFO_FLAGS_MAXLEN_SHIFT), | 
 | 7471 | 			       NIC_SRAM_TX_BUFFER_DESC); | 
 | 7472 | 		txrcb += TG3_BDINFO_SIZE; | 
 | 7473 | 	} | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7474 |  | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 7475 | 	if (tnapi->rx_rcb) { | 
 | 7476 | 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, | 
 | 7477 | 			       (TG3_RX_RCB_RING_SIZE(tp) << | 
 | 7478 | 				BDINFO_FLAGS_MAXLEN_SHIFT), 0); | 
 | 7479 | 		rxrcb += TG3_BDINFO_SIZE; | 
 | 7480 | 	} | 
 | 7481 |  | 
 | 7482 | 	stblk = HOSTCC_STATBLCK_RING1; | 
 | 7483 |  | 
 | 7484 | 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { | 
 | 7485 | 		u64 mapping = (u64)tnapi->status_mapping; | 
 | 7486 | 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); | 
 | 7487 | 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); | 
 | 7488 |  | 
 | 7489 | 		/* Clear status block in ram. */ | 
 | 7490 | 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 
 | 7491 |  | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 7492 | 		if (tnapi->tx_ring) { | 
 | 7493 | 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, | 
 | 7494 | 				       (TG3_TX_RING_SIZE << | 
 | 7495 | 					BDINFO_FLAGS_MAXLEN_SHIFT), | 
 | 7496 | 				       NIC_SRAM_TX_BUFFER_DESC); | 
 | 7497 | 			txrcb += TG3_BDINFO_SIZE; | 
 | 7498 | 		} | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 7499 |  | 
 | 7500 | 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, | 
 | 7501 | 			       (TG3_RX_RCB_RING_SIZE(tp) << | 
 | 7502 | 				BDINFO_FLAGS_MAXLEN_SHIFT), 0); | 
 | 7503 |  | 
 | 7504 | 		stblk += 8; | 
| Matt Carlson | f77a6a8 | 2009-09-01 13:04:37 +0000 | [diff] [blame] | 7505 | 		rxrcb += TG3_BDINFO_SIZE; | 
 | 7506 | 	} | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7507 | } | 
 | 7508 |  | 
 | 7509 | /* tp->lock is held. */ | 
| Gary Zambrano | 8e7a22e | 2006-04-29 18:59:13 -0700 | [diff] [blame] | 7510 | static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7511 | { | 
 | 7512 | 	u32 val, rdmac_mode; | 
 | 7513 | 	int i, err, limit; | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 7514 | 	struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7515 |  | 
 | 7516 | 	tg3_disable_ints(tp); | 
 | 7517 |  | 
 | 7518 | 	tg3_stop_fw(tp); | 
 | 7519 |  | 
 | 7520 | 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); | 
 | 7521 |  | 
 | 7522 | 	if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { | 
| Michael Chan | e6de8ad | 2005-05-05 14:42:41 -0700 | [diff] [blame] | 7523 | 		tg3_abort_hw(tp, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7524 | 	} | 
 | 7525 |  | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 7526 | 	if (reset_phy && | 
 | 7527 | 	    !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) | 
| Michael Chan | d4d2c55 | 2006-03-20 17:47:20 -0800 | [diff] [blame] | 7528 | 		tg3_phy_reset(tp); | 
 | 7529 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7530 | 	err = tg3_chip_reset(tp); | 
 | 7531 | 	if (err) | 
 | 7532 | 		return err; | 
 | 7533 |  | 
 | 7534 | 	tg3_write_sig_legacy(tp, RESET_KIND_INIT); | 
 | 7535 |  | 
| Matt Carlson | bcb37f6 | 2008-11-03 16:52:09 -0800 | [diff] [blame] | 7536 | 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 7537 | 		val = tr32(TG3_CPMU_CTRL); | 
 | 7538 | 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); | 
 | 7539 | 		tw32(TG3_CPMU_CTRL, val); | 
| Matt Carlson | 9acb961 | 2007-11-12 21:10:06 -0800 | [diff] [blame] | 7540 |  | 
 | 7541 | 		val = tr32(TG3_CPMU_LSPD_10MB_CLK); | 
 | 7542 | 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK; | 
 | 7543 | 		val |= CPMU_LSPD_10MB_MACCLK_6_25; | 
 | 7544 | 		tw32(TG3_CPMU_LSPD_10MB_CLK, val); | 
 | 7545 |  | 
 | 7546 | 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); | 
 | 7547 | 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK; | 
 | 7548 | 		val |= CPMU_LNK_AWARE_MACCLK_6_25; | 
 | 7549 | 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); | 
 | 7550 |  | 
 | 7551 | 		val = tr32(TG3_CPMU_HST_ACC); | 
 | 7552 | 		val &= ~CPMU_HST_ACC_MACCLK_MASK; | 
 | 7553 | 		val |= CPMU_HST_ACC_MACCLK_6_25; | 
 | 7554 | 		tw32(TG3_CPMU_HST_ACC, val); | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 7555 | 	} | 
 | 7556 |  | 
| Matt Carlson | 33466d9 | 2009-04-20 06:57:41 +0000 | [diff] [blame] | 7557 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { | 
 | 7558 | 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; | 
 | 7559 | 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | | 
 | 7560 | 		       PCIE_PWR_MGMT_L1_THRESH_4MS; | 
 | 7561 | 		tw32(PCIE_PWR_MGMT_THRESH, val); | 
| Matt Carlson | 521e6b9 | 2009-08-25 10:06:01 +0000 | [diff] [blame] | 7562 |  | 
 | 7563 | 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; | 
 | 7564 | 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); | 
 | 7565 |  | 
 | 7566 | 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); | 
| Matt Carlson | 33466d9 | 2009-04-20 06:57:41 +0000 | [diff] [blame] | 7567 |  | 
| Matt Carlson | f40386c | 2009-11-02 14:24:02 +0000 | [diff] [blame] | 7568 | 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; | 
 | 7569 | 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); | 
| Matt Carlson | 255ca31 | 2009-08-25 10:07:27 +0000 | [diff] [blame] | 7570 | 	} | 
 | 7571 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7572 | 	/* This works around an issue with Athlon chipsets on | 
 | 7573 | 	 * B3 tigon3 silicon.  This bit has no effect on any | 
 | 7574 | 	 * other revision.  But do not set this on PCI Express | 
| Matt Carlson | 795d01c | 2007-10-07 23:28:17 -0700 | [diff] [blame] | 7575 | 	 * chips and don't even touch the clocks if the CPMU is present. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7576 | 	 */ | 
| Matt Carlson | 795d01c | 2007-10-07 23:28:17 -0700 | [diff] [blame] | 7577 | 	if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) { | 
 | 7578 | 		if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | 
 | 7579 | 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; | 
 | 7580 | 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); | 
 | 7581 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7582 |  | 
 | 7583 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && | 
 | 7584 | 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { | 
 | 7585 | 		val = tr32(TG3PCI_PCISTATE); | 
 | 7586 | 		val |= PCISTATE_RETRY_SAME_DMA; | 
 | 7587 | 		tw32(TG3PCI_PCISTATE, val); | 
 | 7588 | 	} | 
 | 7589 |  | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 7590 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 
 | 7591 | 		/* Allow reads and writes to the | 
 | 7592 | 		 * APE register and memory space. | 
 | 7593 | 		 */ | 
 | 7594 | 		val = tr32(TG3PCI_PCISTATE); | 
 | 7595 | 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR | | 
 | 7596 | 		       PCISTATE_ALLOW_APE_SHMEM_WR; | 
 | 7597 | 		tw32(TG3PCI_PCISTATE, val); | 
 | 7598 | 	} | 
 | 7599 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7600 | 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { | 
 | 7601 | 		/* Enable some hw fixes.  */ | 
 | 7602 | 		val = tr32(TG3PCI_MSI_DATA); | 
 | 7603 | 		val |= (1 << 26) | (1 << 28) | (1 << 29); | 
 | 7604 | 		tw32(TG3PCI_MSI_DATA, val); | 
 | 7605 | 	} | 
 | 7606 |  | 
 | 7607 | 	/* Descriptor ring init may make accesses to the | 
 | 7608 | 	 * NIC SRAM area to setup the TX descriptors, so we | 
 | 7609 | 	 * can only do this after the hardware has been | 
 | 7610 | 	 * successfully reset. | 
 | 7611 | 	 */ | 
| Michael Chan | 32d8c57 | 2006-07-25 16:38:29 -0700 | [diff] [blame] | 7612 | 	err = tg3_init_rings(tp); | 
 | 7613 | 	if (err) | 
 | 7614 | 		return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7615 |  | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 7616 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 7617 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | 
| Matt Carlson | cbf9ca6 | 2009-11-13 13:03:40 +0000 | [diff] [blame] | 7618 | 		val = tr32(TG3PCI_DMA_RW_CTRL) & | 
 | 7619 | 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; | 
 | 7620 | 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); | 
 | 7621 | 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && | 
 | 7622 | 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 7623 | 		/* This value is determined during the probe time DMA | 
 | 7624 | 		 * engine test, tg3_test_dma. | 
 | 7625 | 		 */ | 
 | 7626 | 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 
 | 7627 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7628 |  | 
 | 7629 | 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | | 
 | 7630 | 			  GRC_MODE_4X_NIC_SEND_RINGS | | 
 | 7631 | 			  GRC_MODE_NO_TX_PHDR_CSUM | | 
 | 7632 | 			  GRC_MODE_NO_RX_PHDR_CSUM); | 
 | 7633 | 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS; | 
| Michael Chan | d2d746f | 2006-04-06 21:45:39 -0700 | [diff] [blame] | 7634 |  | 
 | 7635 | 	/* Pseudo-header checksum is done by hardware logic and not | 
 | 7636 | 	 * the offload processers, so make the chip do the pseudo- | 
 | 7637 | 	 * header checksums on receive.  For transmit it is more | 
 | 7638 | 	 * convenient to do the pseudo-header checksum in software | 
 | 7639 | 	 * as Linux does that on transmit for us in all cases. | 
 | 7640 | 	 */ | 
 | 7641 | 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7642 |  | 
 | 7643 | 	tw32(GRC_MODE, | 
 | 7644 | 	     tp->grc_mode | | 
 | 7645 | 	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); | 
 | 7646 |  | 
 | 7647 | 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */ | 
 | 7648 | 	val = tr32(GRC_MISC_CFG); | 
 | 7649 | 	val &= ~0xff; | 
 | 7650 | 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); | 
 | 7651 | 	tw32(GRC_MISC_CFG, val); | 
 | 7652 |  | 
 | 7653 | 	/* Initialize MBUF/DESC pool. */ | 
| John W. Linville | cbf4685 | 2005-04-21 17:01:29 -0700 | [diff] [blame] | 7654 | 	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7655 | 		/* Do nothing.  */ | 
 | 7656 | 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { | 
 | 7657 | 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); | 
 | 7658 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) | 
 | 7659 | 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); | 
 | 7660 | 		else | 
 | 7661 | 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); | 
 | 7662 | 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); | 
 | 7663 | 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); | 
 | 7664 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7665 | 	else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { | 
 | 7666 | 		int fw_len; | 
 | 7667 |  | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 7668 | 		fw_len = tp->fw_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7669 | 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); | 
 | 7670 | 		tw32(BUFMGR_MB_POOL_ADDR, | 
 | 7671 | 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); | 
 | 7672 | 		tw32(BUFMGR_MB_POOL_SIZE, | 
 | 7673 | 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); | 
 | 7674 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7675 |  | 
| Michael Chan | 0f893dc | 2005-07-25 12:30:38 -0700 | [diff] [blame] | 7676 | 	if (tp->dev->mtu <= ETH_DATA_LEN) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7677 | 		tw32(BUFMGR_MB_RDMA_LOW_WATER, | 
 | 7678 | 		     tp->bufmgr_config.mbuf_read_dma_low_water); | 
 | 7679 | 		tw32(BUFMGR_MB_MACRX_LOW_WATER, | 
 | 7680 | 		     tp->bufmgr_config.mbuf_mac_rx_low_water); | 
 | 7681 | 		tw32(BUFMGR_MB_HIGH_WATER, | 
 | 7682 | 		     tp->bufmgr_config.mbuf_high_water); | 
 | 7683 | 	} else { | 
 | 7684 | 		tw32(BUFMGR_MB_RDMA_LOW_WATER, | 
 | 7685 | 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); | 
 | 7686 | 		tw32(BUFMGR_MB_MACRX_LOW_WATER, | 
 | 7687 | 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); | 
 | 7688 | 		tw32(BUFMGR_MB_HIGH_WATER, | 
 | 7689 | 		     tp->bufmgr_config.mbuf_high_water_jumbo); | 
 | 7690 | 	} | 
 | 7691 | 	tw32(BUFMGR_DMA_LOW_WATER, | 
 | 7692 | 	     tp->bufmgr_config.dma_low_water); | 
 | 7693 | 	tw32(BUFMGR_DMA_HIGH_WATER, | 
 | 7694 | 	     tp->bufmgr_config.dma_high_water); | 
 | 7695 |  | 
 | 7696 | 	tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE); | 
 | 7697 | 	for (i = 0; i < 2000; i++) { | 
 | 7698 | 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) | 
 | 7699 | 			break; | 
 | 7700 | 		udelay(10); | 
 | 7701 | 	} | 
 | 7702 | 	if (i >= 2000) { | 
 | 7703 | 		printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n", | 
 | 7704 | 		       tp->dev->name); | 
 | 7705 | 		return -ENODEV; | 
 | 7706 | 	} | 
 | 7707 |  | 
 | 7708 | 	/* Setup replenish threshold. */ | 
| Michael Chan | f92905d | 2006-06-29 20:14:29 -0700 | [diff] [blame] | 7709 | 	val = tp->rx_pending / 8; | 
 | 7710 | 	if (val == 0) | 
 | 7711 | 		val = 1; | 
 | 7712 | 	else if (val > tp->rx_std_max_post) | 
 | 7713 | 		val = tp->rx_std_max_post; | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 7714 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
 | 7715 | 		if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) | 
 | 7716 | 			tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); | 
 | 7717 |  | 
 | 7718 | 		if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2)) | 
 | 7719 | 			val = TG3_RX_INTERNAL_RING_SZ_5906 / 2; | 
 | 7720 | 	} | 
| Michael Chan | f92905d | 2006-06-29 20:14:29 -0700 | [diff] [blame] | 7721 |  | 
 | 7722 | 	tw32(RCVBDI_STD_THRESH, val); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7723 |  | 
 | 7724 | 	/* Initialize TG3_BDINFO's at: | 
 | 7725 | 	 *  RCVDBDI_STD_BD:	standard eth size rx ring | 
 | 7726 | 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring | 
 | 7727 | 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work) | 
 | 7728 | 	 * | 
 | 7729 | 	 * like so: | 
 | 7730 | 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring | 
 | 7731 | 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) | | 
 | 7732 | 	 *                              ring attribute flags | 
 | 7733 | 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM | 
 | 7734 | 	 * | 
 | 7735 | 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. | 
 | 7736 | 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. | 
 | 7737 | 	 * | 
 | 7738 | 	 * The size of each ring is fixed in the firmware, but the location is | 
 | 7739 | 	 * configurable. | 
 | 7740 | 	 */ | 
 | 7741 | 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 7742 | 	     ((u64) tpr->rx_std_mapping >> 32)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7743 | 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 7744 | 	     ((u64) tpr->rx_std_mapping & 0xffffffff)); | 
| Matt Carlson | 87668d3 | 2009-11-13 13:03:34 +0000 | [diff] [blame] | 7745 | 	if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) | 
 | 7746 | 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, | 
 | 7747 | 		     NIC_SRAM_RX_BUFFER_DESC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7748 |  | 
| Matt Carlson | fdb72b3 | 2009-08-28 13:57:12 +0000 | [diff] [blame] | 7749 | 	/* Disable the mini ring */ | 
 | 7750 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7751 | 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, | 
 | 7752 | 		     BDINFO_FLAGS_DISABLED); | 
 | 7753 |  | 
| Matt Carlson | fdb72b3 | 2009-08-28 13:57:12 +0000 | [diff] [blame] | 7754 | 	/* Program the jumbo buffer descriptor ring control | 
 | 7755 | 	 * blocks on those devices that have them. | 
 | 7756 | 	 */ | 
| Matt Carlson | 8f666b0 | 2009-08-28 13:58:24 +0000 | [diff] [blame] | 7757 | 	if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && | 
| Matt Carlson | fdb72b3 | 2009-08-28 13:57:12 +0000 | [diff] [blame] | 7758 | 	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7759 | 		/* Setup replenish threshold. */ | 
 | 7760 | 		tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); | 
 | 7761 |  | 
| Michael Chan | 0f893dc | 2005-07-25 12:30:38 -0700 | [diff] [blame] | 7762 | 		if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7763 | 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 7764 | 			     ((u64) tpr->rx_jmb_mapping >> 32)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7765 | 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 7766 | 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7767 | 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, | 
| Matt Carlson | 79ed5ac | 2009-08-28 14:00:55 +0000 | [diff] [blame] | 7768 | 			     (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | | 
 | 7769 | 			     BDINFO_FLAGS_USE_EXT_RECV); | 
| Matt Carlson | 87668d3 | 2009-11-13 13:03:34 +0000 | [diff] [blame] | 7770 | 			if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
 | 7771 | 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, | 
 | 7772 | 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7773 | 		} else { | 
 | 7774 | 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, | 
 | 7775 | 			     BDINFO_FLAGS_DISABLED); | 
 | 7776 | 		} | 
 | 7777 |  | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 7778 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 7779 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 7780 | 			val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | | 
 | 7781 | 			      (RX_STD_MAX_SIZE << 2); | 
 | 7782 | 		else | 
 | 7783 | 			val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT; | 
| Matt Carlson | fdb72b3 | 2009-08-28 13:57:12 +0000 | [diff] [blame] | 7784 | 	} else | 
 | 7785 | 		val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; | 
 | 7786 |  | 
 | 7787 | 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7788 |  | 
| Matt Carlson | 411da64 | 2009-11-13 13:03:46 +0000 | [diff] [blame] | 7789 | 	tpr->rx_std_prod_idx = tp->rx_pending; | 
| Matt Carlson | 66711e6 | 2009-11-13 13:03:49 +0000 | [diff] [blame] | 7790 | 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7791 |  | 
| Matt Carlson | 411da64 | 2009-11-13 13:03:46 +0000 | [diff] [blame] | 7792 | 	tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 7793 | 			  tp->rx_jumbo_pending : 0; | 
| Matt Carlson | 66711e6 | 2009-11-13 13:03:49 +0000 | [diff] [blame] | 7794 | 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7795 |  | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 7796 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 7797 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 7798 | 		tw32(STD_REPLENISH_LWM, 32); | 
 | 7799 | 		tw32(JMB_REPLENISH_LWM, 16); | 
 | 7800 | 	} | 
 | 7801 |  | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7802 | 	tg3_rings_reset(tp); | 
 | 7803 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7804 | 	/* Initialize MAC address and backoff seed. */ | 
| Michael Chan | 986e0ae | 2007-05-05 12:10:20 -0700 | [diff] [blame] | 7805 | 	__tg3_set_mac_addr(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7806 |  | 
 | 7807 | 	/* MTU + ethernet header + FCS + optional VLAN tag */ | 
| Matt Carlson | f7b493e | 2009-02-25 14:21:52 +0000 | [diff] [blame] | 7808 | 	tw32(MAC_RX_MTU_SIZE, | 
 | 7809 | 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7810 |  | 
 | 7811 | 	/* The slot time is changed by tg3_setup_phy if we | 
 | 7812 | 	 * run at gigabit with half duplex. | 
 | 7813 | 	 */ | 
 | 7814 | 	tw32(MAC_TX_LENGTHS, | 
 | 7815 | 	     (2 << TX_LENGTHS_IPG_CRS_SHIFT) | | 
 | 7816 | 	     (6 << TX_LENGTHS_IPG_SHIFT) | | 
 | 7817 | 	     (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); | 
 | 7818 |  | 
 | 7819 | 	/* Receive rules. */ | 
 | 7820 | 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); | 
 | 7821 | 	tw32(RCVLPC_CONFIG, 0x0181); | 
 | 7822 |  | 
 | 7823 | 	/* Calculate RDMAC_MODE setting early, we need it to determine | 
 | 7824 | 	 * the RCVLPC_STATE_ENABLE mask. | 
 | 7825 | 	 */ | 
 | 7826 | 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | | 
 | 7827 | 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | | 
 | 7828 | 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | | 
 | 7829 | 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | | 
 | 7830 | 		      RDMAC_MODE_LNGREAD_ENAB); | 
| Michael Chan | 85e94ce | 2005-04-21 17:05:28 -0700 | [diff] [blame] | 7831 |  | 
| Matt Carlson | 57e6983 | 2008-05-25 23:48:31 -0700 | [diff] [blame] | 7832 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 7833 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 
 | 7834 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 7835 | 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | | 
 | 7836 | 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB | | 
 | 7837 | 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB; | 
 | 7838 |  | 
| Michael Chan | 85e94ce | 2005-04-21 17:05:28 -0700 | [diff] [blame] | 7839 | 	/* If statement applies to 5705 and 5750 PCI devices only */ | 
 | 7840 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 
 | 7841 | 	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || | 
 | 7842 | 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7843 | 		if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && | 
| Matt Carlson | c13e371 | 2007-05-05 11:50:04 -0700 | [diff] [blame] | 7844 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7845 | 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; | 
 | 7846 | 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && | 
 | 7847 | 			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { | 
 | 7848 | 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; | 
 | 7849 | 		} | 
 | 7850 | 	} | 
 | 7851 |  | 
| Michael Chan | 85e94ce | 2005-04-21 17:05:28 -0700 | [diff] [blame] | 7852 | 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) | 
 | 7853 | 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; | 
 | 7854 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7855 | 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 
| Matt Carlson | 027455a | 2008-12-21 20:19:30 -0800 | [diff] [blame] | 7856 | 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; | 
 | 7857 |  | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 7858 | 	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || | 
 | 7859 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 
| Matt Carlson | 027455a | 2008-12-21 20:19:30 -0800 | [diff] [blame] | 7860 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 
 | 7861 | 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7862 |  | 
 | 7863 | 	/* Receive/send statistics. */ | 
| Michael Chan | 1661394 | 2006-06-29 20:15:13 -0700 | [diff] [blame] | 7864 | 	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 
 | 7865 | 		val = tr32(RCVLPC_STATS_ENABLE); | 
 | 7866 | 		val &= ~RCVLPC_STATSENAB_DACK_FIX; | 
 | 7867 | 		tw32(RCVLPC_STATS_ENABLE, val); | 
 | 7868 | 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && | 
 | 7869 | 		   (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7870 | 		val = tr32(RCVLPC_STATS_ENABLE); | 
 | 7871 | 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; | 
 | 7872 | 		tw32(RCVLPC_STATS_ENABLE, val); | 
 | 7873 | 	} else { | 
 | 7874 | 		tw32(RCVLPC_STATS_ENABLE, 0xffffff); | 
 | 7875 | 	} | 
 | 7876 | 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); | 
 | 7877 | 	tw32(SNDDATAI_STATSENAB, 0xffffff); | 
 | 7878 | 	tw32(SNDDATAI_STATSCTRL, | 
 | 7879 | 	     (SNDDATAI_SCTRL_ENABLE | | 
 | 7880 | 	      SNDDATAI_SCTRL_FASTUPD)); | 
 | 7881 |  | 
 | 7882 | 	/* Setup host coalescing engine. */ | 
 | 7883 | 	tw32(HOSTCC_MODE, 0); | 
 | 7884 | 	for (i = 0; i < 2000; i++) { | 
 | 7885 | 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) | 
 | 7886 | 			break; | 
 | 7887 | 		udelay(10); | 
 | 7888 | 	} | 
 | 7889 |  | 
| Michael Chan | d244c89 | 2005-07-05 14:42:33 -0700 | [diff] [blame] | 7890 | 	__tg3_set_coalesce(tp, &tp->coal); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7891 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7892 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 
 | 7893 | 		/* Status/statistics block address.  See tg3_timer, | 
 | 7894 | 		 * the tg3_periodic_fetch_stats call there, and | 
 | 7895 | 		 * tg3_get_stats to see how this works for 5705/5750 chips. | 
 | 7896 | 		 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7897 | 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, | 
 | 7898 | 		     ((u64) tp->stats_mapping >> 32)); | 
 | 7899 | 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, | 
 | 7900 | 		     ((u64) tp->stats_mapping & 0xffffffff)); | 
 | 7901 | 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7902 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7903 | 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); | 
| Matt Carlson | 2d31eca | 2009-09-01 12:53:31 +0000 | [diff] [blame] | 7904 |  | 
 | 7905 | 		/* Clear statistics and status block memory areas */ | 
 | 7906 | 		for (i = NIC_SRAM_STATS_BLK; | 
 | 7907 | 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; | 
 | 7908 | 		     i += sizeof(u32)) { | 
 | 7909 | 			tg3_write_mem(tp, i, 0); | 
 | 7910 | 			udelay(40); | 
 | 7911 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7912 | 	} | 
 | 7913 |  | 
 | 7914 | 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); | 
 | 7915 |  | 
 | 7916 | 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); | 
 | 7917 | 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); | 
 | 7918 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
 | 7919 | 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); | 
 | 7920 |  | 
| Michael Chan | c94e394 | 2005-09-27 12:12:42 -0700 | [diff] [blame] | 7921 | 	if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 
 | 7922 | 		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 
 | 7923 | 		/* reset to prevent losing 1st rx packet intermittently */ | 
 | 7924 | 		tw32_f(MAC_RX_MODE, RX_MODE_RESET); | 
 | 7925 | 		udelay(10); | 
 | 7926 | 	} | 
 | 7927 |  | 
| Matt Carlson | 3bda125 | 2008-08-15 14:08:22 -0700 | [diff] [blame] | 7928 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) | 
 | 7929 | 		tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; | 
 | 7930 | 	else | 
 | 7931 | 		tp->mac_mode = 0; | 
 | 7932 | 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7933 | 		MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; | 
| Matt Carlson | e8f3f6c | 2007-07-11 19:47:55 -0700 | [diff] [blame] | 7934 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 
 | 7935 | 	    !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && | 
 | 7936 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) | 
 | 7937 | 		tp->mac_mode |= MAC_MODE_LINK_POLARITY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7938 | 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); | 
 | 7939 | 	udelay(40); | 
 | 7940 |  | 
| Michael Chan | 314fba3 | 2005-04-21 17:07:04 -0700 | [diff] [blame] | 7941 | 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 7942 | 	 * If TG3_FLG2_IS_NIC is zero, we should read the | 
| Michael Chan | 314fba3 | 2005-04-21 17:07:04 -0700 | [diff] [blame] | 7943 | 	 * register to preserve the GPIO settings for LOMs. The GPIOs, | 
 | 7944 | 	 * whether used as inputs or outputs, are set by boot code after | 
 | 7945 | 	 * reset. | 
 | 7946 | 	 */ | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 7947 | 	if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) { | 
| Michael Chan | 314fba3 | 2005-04-21 17:07:04 -0700 | [diff] [blame] | 7948 | 		u32 gpio_mask; | 
 | 7949 |  | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 7950 | 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | | 
 | 7951 | 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | | 
 | 7952 | 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; | 
| Michael Chan | 3e7d83b | 2005-04-21 17:10:36 -0700 | [diff] [blame] | 7953 |  | 
 | 7954 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) | 
 | 7955 | 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | | 
 | 7956 | 				     GRC_LCLCTRL_GPIO_OUTPUT3; | 
 | 7957 |  | 
| Michael Chan | af36e6b | 2006-03-23 01:28:06 -0800 | [diff] [blame] | 7958 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 
 | 7959 | 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; | 
 | 7960 |  | 
| Gary Zambrano | aaf8446 | 2007-05-05 11:51:45 -0700 | [diff] [blame] | 7961 | 		tp->grc_local_ctrl &= ~gpio_mask; | 
| Michael Chan | 314fba3 | 2005-04-21 17:07:04 -0700 | [diff] [blame] | 7962 | 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; | 
 | 7963 |  | 
 | 7964 | 		/* GPIO1 must be driven high for eeprom write protect */ | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 7965 | 		if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) | 
 | 7966 | 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | | 
 | 7967 | 					       GRC_LCLCTRL_GPIO_OUTPUT1); | 
| Michael Chan | 314fba3 | 2005-04-21 17:07:04 -0700 | [diff] [blame] | 7968 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7969 | 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 
 | 7970 | 	udelay(100); | 
 | 7971 |  | 
| Matt Carlson | baf8a94 | 2009-09-01 13:13:00 +0000 | [diff] [blame] | 7972 | 	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { | 
 | 7973 | 		val = tr32(MSGINT_MODE); | 
 | 7974 | 		val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; | 
 | 7975 | 		tw32(MSGINT_MODE, val); | 
 | 7976 | 	} | 
 | 7977 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7978 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 
 | 7979 | 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); | 
 | 7980 | 		udelay(40); | 
 | 7981 | 	} | 
 | 7982 |  | 
 | 7983 | 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | | 
 | 7984 | 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | | 
 | 7985 | 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | | 
 | 7986 | 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | | 
 | 7987 | 	       WDMAC_MODE_LNGREAD_ENAB); | 
 | 7988 |  | 
| Michael Chan | 85e94ce | 2005-04-21 17:05:28 -0700 | [diff] [blame] | 7989 | 	/* If statement applies to 5705 and 5750 PCI devices only */ | 
 | 7990 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 
 | 7991 | 	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || | 
 | 7992 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { | 
| Matt Carlson | 29ea095 | 2009-08-25 10:07:54 +0000 | [diff] [blame] | 7993 | 		if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7994 | 		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || | 
 | 7995 | 		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { | 
 | 7996 | 			/* nothing */ | 
 | 7997 | 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && | 
 | 7998 | 			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && | 
 | 7999 | 			   !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { | 
 | 8000 | 			val |= WDMAC_MODE_RX_ACCEL; | 
 | 8001 | 		} | 
 | 8002 | 	} | 
 | 8003 |  | 
| Michael Chan | d9ab5ad | 2006-03-20 22:27:35 -0800 | [diff] [blame] | 8004 | 	/* Enable host coalescing bug fix */ | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 8005 | 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 8006 | 		val |= WDMAC_MODE_STATUS_TAG_FIX; | 
| Michael Chan | d9ab5ad | 2006-03-20 22:27:35 -0800 | [diff] [blame] | 8007 |  | 
| Matt Carlson | 788a035 | 2009-11-02 14:26:03 +0000 | [diff] [blame] | 8008 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | 
 | 8009 | 		val |= WDMAC_MODE_BURST_ALL_DATA; | 
 | 8010 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8011 | 	tw32_f(WDMAC_MODE, val); | 
 | 8012 | 	udelay(40); | 
 | 8013 |  | 
| Matt Carlson | 9974a35 | 2007-10-07 23:27:28 -0700 | [diff] [blame] | 8014 | 	if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { | 
 | 8015 | 		u16 pcix_cmd; | 
 | 8016 |  | 
 | 8017 | 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, | 
 | 8018 | 				     &pcix_cmd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8019 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { | 
| Matt Carlson | 9974a35 | 2007-10-07 23:27:28 -0700 | [diff] [blame] | 8020 | 			pcix_cmd &= ~PCI_X_CMD_MAX_READ; | 
 | 8021 | 			pcix_cmd |= PCI_X_CMD_READ_2K; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8022 | 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 
| Matt Carlson | 9974a35 | 2007-10-07 23:27:28 -0700 | [diff] [blame] | 8023 | 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); | 
 | 8024 | 			pcix_cmd |= PCI_X_CMD_READ_2K; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8025 | 		} | 
| Matt Carlson | 9974a35 | 2007-10-07 23:27:28 -0700 | [diff] [blame] | 8026 | 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, | 
 | 8027 | 				      pcix_cmd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8028 | 	} | 
 | 8029 |  | 
 | 8030 | 	tw32_f(RDMAC_MODE, rdmac_mode); | 
 | 8031 | 	udelay(40); | 
 | 8032 |  | 
 | 8033 | 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); | 
 | 8034 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
 | 8035 | 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 8036 |  | 
 | 8037 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) | 
 | 8038 | 		tw32(SNDDATAC_MODE, | 
 | 8039 | 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); | 
 | 8040 | 	else | 
 | 8041 | 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); | 
 | 8042 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8043 | 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); | 
 | 8044 | 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); | 
 | 8045 | 	tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); | 
 | 8046 | 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8047 | 	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 
 | 8048 | 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); | 
| Matt Carlson | baf8a94 | 2009-09-01 13:13:00 +0000 | [diff] [blame] | 8049 | 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 8050 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | 
| Matt Carlson | baf8a94 | 2009-09-01 13:13:00 +0000 | [diff] [blame] | 8051 | 		val |= SNDBDI_MODE_MULTI_TXQ_EN; | 
 | 8052 | 	tw32(SNDBDI_MODE, val); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8053 | 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); | 
 | 8054 |  | 
 | 8055 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { | 
 | 8056 | 		err = tg3_load_5701_a0_firmware_fix(tp); | 
 | 8057 | 		if (err) | 
 | 8058 | 			return err; | 
 | 8059 | 	} | 
 | 8060 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8061 | 	if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { | 
 | 8062 | 		err = tg3_load_tso_firmware(tp); | 
 | 8063 | 		if (err) | 
 | 8064 | 			return err; | 
 | 8065 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8066 |  | 
 | 8067 | 	tp->tx_mode = TX_MODE_ENABLE; | 
 | 8068 | 	tw32_f(MAC_TX_MODE, tp->tx_mode); | 
 | 8069 | 	udelay(100); | 
 | 8070 |  | 
| Matt Carlson | baf8a94 | 2009-09-01 13:13:00 +0000 | [diff] [blame] | 8071 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) { | 
 | 8072 | 		u32 reg = MAC_RSS_INDIR_TBL_0; | 
 | 8073 | 		u8 *ent = (u8 *)&val; | 
 | 8074 |  | 
 | 8075 | 		/* Setup the indirection table */ | 
 | 8076 | 		for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { | 
 | 8077 | 			int idx = i % sizeof(val); | 
 | 8078 |  | 
 | 8079 | 			ent[idx] = i % (tp->irq_cnt - 1); | 
 | 8080 | 			if (idx == sizeof(val) - 1) { | 
 | 8081 | 				tw32(reg, val); | 
 | 8082 | 				reg += 4; | 
 | 8083 | 			} | 
 | 8084 | 		} | 
 | 8085 |  | 
 | 8086 | 		/* Setup the "secret" hash key. */ | 
 | 8087 | 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); | 
 | 8088 | 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); | 
 | 8089 | 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); | 
 | 8090 | 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985); | 
 | 8091 | 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); | 
 | 8092 | 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); | 
 | 8093 | 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); | 
 | 8094 | 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); | 
 | 8095 | 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); | 
 | 8096 | 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); | 
 | 8097 | 	} | 
 | 8098 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8099 | 	tp->rx_mode = RX_MODE_ENABLE; | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 8100 | 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 
| Michael Chan | af36e6b | 2006-03-23 01:28:06 -0800 | [diff] [blame] | 8101 | 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; | 
 | 8102 |  | 
| Matt Carlson | baf8a94 | 2009-09-01 13:13:00 +0000 | [diff] [blame] | 8103 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) | 
 | 8104 | 		tp->rx_mode |= RX_MODE_RSS_ENABLE | | 
 | 8105 | 			       RX_MODE_RSS_ITBL_HASH_BITS_7 | | 
 | 8106 | 			       RX_MODE_RSS_IPV6_HASH_EN | | 
 | 8107 | 			       RX_MODE_RSS_TCP_IPV6_HASH_EN | | 
 | 8108 | 			       RX_MODE_RSS_IPV4_HASH_EN | | 
 | 8109 | 			       RX_MODE_RSS_TCP_IPV4_HASH_EN; | 
 | 8110 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8111 | 	tw32_f(MAC_RX_MODE, tp->rx_mode); | 
 | 8112 | 	udelay(10); | 
 | 8113 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8114 | 	tw32(MAC_LED_CTRL, tp->led_ctrl); | 
 | 8115 |  | 
 | 8116 | 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); | 
| Michael Chan | c94e394 | 2005-09-27 12:12:42 -0700 | [diff] [blame] | 8117 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8118 | 		tw32_f(MAC_RX_MODE, RX_MODE_RESET); | 
 | 8119 | 		udelay(10); | 
 | 8120 | 	} | 
 | 8121 | 	tw32_f(MAC_RX_MODE, tp->rx_mode); | 
 | 8122 | 	udelay(10); | 
 | 8123 |  | 
 | 8124 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 
 | 8125 | 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && | 
 | 8126 | 			!(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) { | 
 | 8127 | 			/* Set drive transmission level to 1.2V  */ | 
 | 8128 | 			/* only if the signal pre-emphasis bit is not set  */ | 
 | 8129 | 			val = tr32(MAC_SERDES_CFG); | 
 | 8130 | 			val &= 0xfffff000; | 
 | 8131 | 			val |= 0x880; | 
 | 8132 | 			tw32(MAC_SERDES_CFG, val); | 
 | 8133 | 		} | 
 | 8134 | 		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) | 
 | 8135 | 			tw32(MAC_SERDES_CFG, 0x616000); | 
 | 8136 | 	} | 
 | 8137 |  | 
 | 8138 | 	/* Prevent chip from dropping frames when flow control | 
 | 8139 | 	 * is enabled. | 
 | 8140 | 	 */ | 
 | 8141 | 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2); | 
 | 8142 |  | 
 | 8143 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && | 
 | 8144 | 	    (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { | 
 | 8145 | 		/* Use hardware link auto-negotiation */ | 
 | 8146 | 		tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; | 
 | 8147 | 	} | 
 | 8148 |  | 
| Michael Chan | d4d2c55 | 2006-03-20 17:47:20 -0800 | [diff] [blame] | 8149 | 	if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && | 
 | 8150 | 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { | 
 | 8151 | 		u32 tmp; | 
 | 8152 |  | 
 | 8153 | 		tmp = tr32(SERDES_RX_CTRL); | 
 | 8154 | 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); | 
 | 8155 | 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; | 
 | 8156 | 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; | 
 | 8157 | 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 
 | 8158 | 	} | 
 | 8159 |  | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 8160 | 	if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { | 
 | 8161 | 		if (tp->link_config.phy_is_low_power) { | 
 | 8162 | 			tp->link_config.phy_is_low_power = 0; | 
 | 8163 | 			tp->link_config.speed = tp->link_config.orig_speed; | 
 | 8164 | 			tp->link_config.duplex = tp->link_config.orig_duplex; | 
 | 8165 | 			tp->link_config.autoneg = tp->link_config.orig_autoneg; | 
 | 8166 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8167 |  | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 8168 | 		err = tg3_setup_phy(tp, 0); | 
 | 8169 | 		if (err) | 
 | 8170 | 			return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8171 |  | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 8172 | 		if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 8173 | 		    !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) { | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 8174 | 			u32 tmp; | 
 | 8175 |  | 
 | 8176 | 			/* Clear CRC stats. */ | 
 | 8177 | 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { | 
 | 8178 | 				tg3_writephy(tp, MII_TG3_TEST1, | 
 | 8179 | 					     tmp | MII_TG3_TEST1_CRC_EN); | 
 | 8180 | 				tg3_readphy(tp, 0x14, &tmp); | 
 | 8181 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8182 | 		} | 
 | 8183 | 	} | 
 | 8184 |  | 
 | 8185 | 	__tg3_set_rx_mode(tp->dev); | 
 | 8186 |  | 
 | 8187 | 	/* Initialize receive rules. */ | 
 | 8188 | 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK); | 
 | 8189 | 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); | 
 | 8190 | 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK); | 
 | 8191 | 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); | 
 | 8192 |  | 
| Michael Chan | 4cf78e4 | 2005-07-25 12:29:19 -0700 | [diff] [blame] | 8193 | 	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 8194 | 	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8195 | 		limit = 8; | 
 | 8196 | 	else | 
 | 8197 | 		limit = 16; | 
 | 8198 | 	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) | 
 | 8199 | 		limit -= 4; | 
 | 8200 | 	switch (limit) { | 
 | 8201 | 	case 16: | 
 | 8202 | 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0); | 
 | 8203 | 	case 15: | 
 | 8204 | 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0); | 
 | 8205 | 	case 14: | 
 | 8206 | 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0); | 
 | 8207 | 	case 13: | 
 | 8208 | 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0); | 
 | 8209 | 	case 12: | 
 | 8210 | 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0); | 
 | 8211 | 	case 11: | 
 | 8212 | 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0); | 
 | 8213 | 	case 10: | 
 | 8214 | 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0); | 
 | 8215 | 	case 9: | 
 | 8216 | 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0); | 
 | 8217 | 	case 8: | 
 | 8218 | 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0); | 
 | 8219 | 	case 7: | 
 | 8220 | 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0); | 
 | 8221 | 	case 6: | 
 | 8222 | 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0); | 
 | 8223 | 	case 5: | 
 | 8224 | 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0); | 
 | 8225 | 	case 4: | 
 | 8226 | 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */ | 
 | 8227 | 	case 3: | 
 | 8228 | 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */ | 
 | 8229 | 	case 2: | 
 | 8230 | 	case 1: | 
 | 8231 |  | 
 | 8232 | 	default: | 
 | 8233 | 		break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 8234 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8235 |  | 
| Matt Carlson | 9ce768e | 2007-10-11 19:49:11 -0700 | [diff] [blame] | 8236 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) | 
 | 8237 | 		/* Write our heartbeat update interval to APE. */ | 
 | 8238 | 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, | 
 | 8239 | 				APE_HOST_HEARTBEAT_INT_DISABLE); | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 8240 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8241 | 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT); | 
 | 8242 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8243 | 	return 0; | 
 | 8244 | } | 
 | 8245 |  | 
 | 8246 | /* Called at device open time to get the chip ready for | 
 | 8247 |  * packet processing.  Invoked with tp->lock held. | 
 | 8248 |  */ | 
| Gary Zambrano | 8e7a22e | 2006-04-29 18:59:13 -0700 | [diff] [blame] | 8249 | static int tg3_init_hw(struct tg3 *tp, int reset_phy) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8250 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8251 | 	tg3_switch_clocks(tp); | 
 | 8252 |  | 
 | 8253 | 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 
 | 8254 |  | 
| Matt Carlson | 2f751b6 | 2008-08-04 23:17:34 -0700 | [diff] [blame] | 8255 | 	return tg3_reset_hw(tp, reset_phy); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8256 | } | 
 | 8257 |  | 
 | 8258 | #define TG3_STAT_ADD32(PSTAT, REG) \ | 
 | 8259 | do {	u32 __val = tr32(REG); \ | 
 | 8260 | 	(PSTAT)->low += __val; \ | 
 | 8261 | 	if ((PSTAT)->low < __val) \ | 
 | 8262 | 		(PSTAT)->high += 1; \ | 
 | 8263 | } while (0) | 
 | 8264 |  | 
 | 8265 | static void tg3_periodic_fetch_stats(struct tg3 *tp) | 
 | 8266 | { | 
 | 8267 | 	struct tg3_hw_stats *sp = tp->hw_stats; | 
 | 8268 |  | 
 | 8269 | 	if (!netif_carrier_ok(tp->dev)) | 
 | 8270 | 		return; | 
 | 8271 |  | 
 | 8272 | 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); | 
 | 8273 | 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); | 
 | 8274 | 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); | 
 | 8275 | 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); | 
 | 8276 | 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); | 
 | 8277 | 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); | 
 | 8278 | 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); | 
 | 8279 | 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); | 
 | 8280 | 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); | 
 | 8281 | 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); | 
 | 8282 | 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); | 
 | 8283 | 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); | 
 | 8284 | 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); | 
 | 8285 |  | 
 | 8286 | 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); | 
 | 8287 | 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); | 
 | 8288 | 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); | 
 | 8289 | 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); | 
 | 8290 | 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); | 
 | 8291 | 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); | 
 | 8292 | 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); | 
 | 8293 | 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); | 
 | 8294 | 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); | 
 | 8295 | 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); | 
 | 8296 | 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); | 
 | 8297 | 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); | 
 | 8298 | 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); | 
 | 8299 | 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); | 
| Michael Chan | 463d305 | 2006-05-22 16:36:27 -0700 | [diff] [blame] | 8300 |  | 
 | 8301 | 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); | 
 | 8302 | 	TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); | 
 | 8303 | 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8304 | } | 
 | 8305 |  | 
 | 8306 | static void tg3_timer(unsigned long __opaque) | 
 | 8307 | { | 
 | 8308 | 	struct tg3 *tp = (struct tg3 *) __opaque; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8309 |  | 
| Michael Chan | f475f16 | 2006-03-27 23:20:14 -0800 | [diff] [blame] | 8310 | 	if (tp->irq_sync) | 
 | 8311 | 		goto restart_timer; | 
 | 8312 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8313 | 	spin_lock(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8314 |  | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 8315 | 	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { | 
 | 8316 | 		/* All of this garbage is because when using non-tagged | 
 | 8317 | 		 * IRQ status the mailbox/status_block protocol the chip | 
 | 8318 | 		 * uses with the cpu is race prone. | 
 | 8319 | 		 */ | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 8320 | 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 8321 | 			tw32(GRC_LOCAL_CTRL, | 
 | 8322 | 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); | 
 | 8323 | 		} else { | 
 | 8324 | 			tw32(HOSTCC_MODE, tp->coalesce_mode | | 
| Matt Carlson | fd2ce37 | 2009-09-01 12:51:13 +0000 | [diff] [blame] | 8325 | 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 8326 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8327 |  | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 8328 | 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { | 
 | 8329 | 			tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8330 | 			spin_unlock(&tp->lock); | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 8331 | 			schedule_work(&tp->reset_task); | 
 | 8332 | 			return; | 
 | 8333 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8334 | 	} | 
 | 8335 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8336 | 	/* This part only runs once per second. */ | 
 | 8337 | 	if (!--tp->timer_counter) { | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 8338 | 		if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 
 | 8339 | 			tg3_periodic_fetch_stats(tp); | 
 | 8340 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8341 | 		if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { | 
 | 8342 | 			u32 mac_stat; | 
 | 8343 | 			int phy_event; | 
 | 8344 |  | 
 | 8345 | 			mac_stat = tr32(MAC_STATUS); | 
 | 8346 |  | 
 | 8347 | 			phy_event = 0; | 
 | 8348 | 			if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) { | 
 | 8349 | 				if (mac_stat & MAC_STATUS_MI_INTERRUPT) | 
 | 8350 | 					phy_event = 1; | 
 | 8351 | 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) | 
 | 8352 | 				phy_event = 1; | 
 | 8353 |  | 
 | 8354 | 			if (phy_event) | 
 | 8355 | 				tg3_setup_phy(tp, 0); | 
 | 8356 | 		} else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) { | 
 | 8357 | 			u32 mac_stat = tr32(MAC_STATUS); | 
 | 8358 | 			int need_setup = 0; | 
 | 8359 |  | 
 | 8360 | 			if (netif_carrier_ok(tp->dev) && | 
 | 8361 | 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { | 
 | 8362 | 				need_setup = 1; | 
 | 8363 | 			} | 
 | 8364 | 			if (! netif_carrier_ok(tp->dev) && | 
 | 8365 | 			    (mac_stat & (MAC_STATUS_PCS_SYNCED | | 
 | 8366 | 					 MAC_STATUS_SIGNAL_DET))) { | 
 | 8367 | 				need_setup = 1; | 
 | 8368 | 			} | 
 | 8369 | 			if (need_setup) { | 
| Michael Chan | 3d3ebe7 | 2006-09-27 15:59:15 -0700 | [diff] [blame] | 8370 | 				if (!tp->serdes_counter) { | 
 | 8371 | 					tw32_f(MAC_MODE, | 
 | 8372 | 					     (tp->mac_mode & | 
 | 8373 | 					      ~MAC_MODE_PORT_MODE_MASK)); | 
 | 8374 | 					udelay(40); | 
 | 8375 | 					tw32_f(MAC_MODE, tp->mac_mode); | 
 | 8376 | 					udelay(40); | 
 | 8377 | 				} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8378 | 				tg3_setup_phy(tp, 0); | 
 | 8379 | 			} | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 8380 | 		} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) | 
 | 8381 | 			tg3_serdes_parallel_detect(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8382 |  | 
 | 8383 | 		tp->timer_counter = tp->timer_multiplier; | 
 | 8384 | 	} | 
 | 8385 |  | 
| Michael Chan | 130b8e4 | 2006-09-27 16:00:40 -0700 | [diff] [blame] | 8386 | 	/* Heartbeat is only sent once every 2 seconds. | 
 | 8387 | 	 * | 
 | 8388 | 	 * The heartbeat is to tell the ASF firmware that the host | 
 | 8389 | 	 * driver is still alive.  In the event that the OS crashes, | 
 | 8390 | 	 * ASF needs to reset the hardware to free up the FIFO space | 
 | 8391 | 	 * that may be filled with rx packets destined for the host. | 
 | 8392 | 	 * If the FIFO is full, ASF will no longer function properly. | 
 | 8393 | 	 * | 
 | 8394 | 	 * Unintended resets have been reported on real time kernels | 
 | 8395 | 	 * where the timer doesn't run on time.  Netpoll will also have | 
 | 8396 | 	 * same problem. | 
 | 8397 | 	 * | 
 | 8398 | 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware | 
 | 8399 | 	 * to check the ring condition when the heartbeat is expiring | 
 | 8400 | 	 * before doing the reset.  This will prevent most unintended | 
 | 8401 | 	 * resets. | 
 | 8402 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8403 | 	if (!--tp->asf_counter) { | 
| Matt Carlson | bc7959b | 2008-08-15 14:08:55 -0700 | [diff] [blame] | 8404 | 		if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | 
 | 8405 | 		    !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { | 
| Matt Carlson | 7c5026a | 2008-05-02 16:49:29 -0700 | [diff] [blame] | 8406 | 			tg3_wait_for_event_ack(tp); | 
 | 8407 |  | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 8408 | 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, | 
| Michael Chan | 130b8e4 | 2006-09-27 16:00:40 -0700 | [diff] [blame] | 8409 | 				      FWCMD_NICDRV_ALIVE3); | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 8410 | 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); | 
| Michael Chan | 28fbef7 | 2005-10-26 15:48:35 -0700 | [diff] [blame] | 8411 | 			/* 5 seconds timeout */ | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 8412 | 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); | 
| Matt Carlson | 4ba526c | 2008-08-15 14:10:04 -0700 | [diff] [blame] | 8413 |  | 
 | 8414 | 			tg3_generate_fw_event(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8415 | 		} | 
 | 8416 | 		tp->asf_counter = tp->asf_multiplier; | 
 | 8417 | 	} | 
 | 8418 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8419 | 	spin_unlock(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8420 |  | 
| Michael Chan | f475f16 | 2006-03-27 23:20:14 -0800 | [diff] [blame] | 8421 | restart_timer: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8422 | 	tp->timer.expires = jiffies + tp->timer_offset; | 
 | 8423 | 	add_timer(&tp->timer); | 
 | 8424 | } | 
 | 8425 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8426 | static int tg3_request_irq(struct tg3 *tp, int irq_num) | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8427 | { | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 8428 | 	irq_handler_t fn; | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8429 | 	unsigned long flags; | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8430 | 	char *name; | 
 | 8431 | 	struct tg3_napi *tnapi = &tp->napi[irq_num]; | 
 | 8432 |  | 
 | 8433 | 	if (tp->irq_cnt == 1) | 
 | 8434 | 		name = tp->dev->name; | 
 | 8435 | 	else { | 
 | 8436 | 		name = &tnapi->irq_lbl[0]; | 
 | 8437 | 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); | 
 | 8438 | 		name[IFNAMSIZ-1] = 0; | 
 | 8439 | 	} | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8440 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8441 | 	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8442 | 		fn = tg3_msi; | 
 | 8443 | 		if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) | 
 | 8444 | 			fn = tg3_msi_1shot; | 
| Thomas Gleixner | 1fb9df5 | 2006-07-01 19:29:39 -0700 | [diff] [blame] | 8445 | 		flags = IRQF_SAMPLE_RANDOM; | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8446 | 	} else { | 
 | 8447 | 		fn = tg3_interrupt; | 
 | 8448 | 		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | 
 | 8449 | 			fn = tg3_interrupt_tagged; | 
| Thomas Gleixner | 1fb9df5 | 2006-07-01 19:29:39 -0700 | [diff] [blame] | 8450 | 		flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM; | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8451 | 	} | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8452 |  | 
 | 8453 | 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8454 | } | 
 | 8455 |  | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8456 | static int tg3_test_interrupt(struct tg3 *tp) | 
 | 8457 | { | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 8458 | 	struct tg3_napi *tnapi = &tp->napi[0]; | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8459 | 	struct net_device *dev = tp->dev; | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 8460 | 	int err, i, intr_ok = 0; | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 8461 | 	u32 val; | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8462 |  | 
| Michael Chan | d4bc392 | 2005-05-29 14:59:20 -0700 | [diff] [blame] | 8463 | 	if (!netif_running(dev)) | 
 | 8464 | 		return -ENODEV; | 
 | 8465 |  | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8466 | 	tg3_disable_ints(tp); | 
 | 8467 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8468 | 	free_irq(tnapi->irq_vec, tnapi); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8469 |  | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 8470 | 	/* | 
 | 8471 | 	 * Turn off MSI one shot mode.  Otherwise this test has no | 
 | 8472 | 	 * observable way to know whether the interrupt was delivered. | 
 | 8473 | 	 */ | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 8474 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 8475 | 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 8476 | 	    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { | 
 | 8477 | 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; | 
 | 8478 | 		tw32(MSGINT_MODE, val); | 
 | 8479 | 	} | 
 | 8480 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8481 | 	err = request_irq(tnapi->irq_vec, tg3_test_isr, | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 8482 | 			  IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8483 | 	if (err) | 
 | 8484 | 		return err; | 
 | 8485 |  | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 8486 | 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED; | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8487 | 	tg3_enable_ints(tp); | 
 | 8488 |  | 
 | 8489 | 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 
| Matt Carlson | fd2ce37 | 2009-09-01 12:51:13 +0000 | [diff] [blame] | 8490 | 	       tnapi->coal_now); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8491 |  | 
 | 8492 | 	for (i = 0; i < 5; i++) { | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 8493 | 		u32 int_mbox, misc_host_ctrl; | 
 | 8494 |  | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 8495 | 		int_mbox = tr32_mailbox(tnapi->int_mbox); | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 8496 | 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); | 
 | 8497 |  | 
 | 8498 | 		if ((int_mbox != 0) || | 
 | 8499 | 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { | 
 | 8500 | 			intr_ok = 1; | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8501 | 			break; | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 8502 | 		} | 
 | 8503 |  | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8504 | 		msleep(10); | 
 | 8505 | 	} | 
 | 8506 |  | 
 | 8507 | 	tg3_disable_ints(tp); | 
 | 8508 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8509 | 	free_irq(tnapi->irq_vec, tnapi); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 8510 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8511 | 	err = tg3_request_irq(tp, 0); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8512 |  | 
 | 8513 | 	if (err) | 
 | 8514 | 		return err; | 
 | 8515 |  | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 8516 | 	if (intr_ok) { | 
 | 8517 | 		/* Reenable MSI one shot mode. */ | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 8518 | 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 8519 | 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 8520 | 		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { | 
 | 8521 | 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; | 
 | 8522 | 			tw32(MSGINT_MODE, val); | 
 | 8523 | 		} | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8524 | 		return 0; | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 8525 | 	} | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8526 |  | 
 | 8527 | 	return -EIO; | 
 | 8528 | } | 
 | 8529 |  | 
 | 8530 | /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is | 
 | 8531 |  * successfully restored | 
 | 8532 |  */ | 
 | 8533 | static int tg3_test_msi(struct tg3 *tp) | 
 | 8534 | { | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8535 | 	int err; | 
 | 8536 | 	u16 pci_cmd; | 
 | 8537 |  | 
 | 8538 | 	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI)) | 
 | 8539 | 		return 0; | 
 | 8540 |  | 
 | 8541 | 	/* Turn off SERR reporting in case MSI terminates with Master | 
 | 8542 | 	 * Abort. | 
 | 8543 | 	 */ | 
 | 8544 | 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); | 
 | 8545 | 	pci_write_config_word(tp->pdev, PCI_COMMAND, | 
 | 8546 | 			      pci_cmd & ~PCI_COMMAND_SERR); | 
 | 8547 |  | 
 | 8548 | 	err = tg3_test_interrupt(tp); | 
 | 8549 |  | 
 | 8550 | 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); | 
 | 8551 |  | 
 | 8552 | 	if (!err) | 
 | 8553 | 		return 0; | 
 | 8554 |  | 
 | 8555 | 	/* other failures */ | 
 | 8556 | 	if (err != -EIO) | 
 | 8557 | 		return err; | 
 | 8558 |  | 
 | 8559 | 	/* MSI test failed, go back to INTx mode */ | 
 | 8560 | 	printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " | 
 | 8561 | 	       "switching to INTx mode. Please report this failure to " | 
 | 8562 | 	       "the PCI maintainer and include system chipset information.\n", | 
 | 8563 | 		       tp->dev->name); | 
 | 8564 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8565 | 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]); | 
| Matt Carlson | 09943a1 | 2009-08-28 14:01:57 +0000 | [diff] [blame] | 8566 |  | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8567 | 	pci_disable_msi(tp->pdev); | 
 | 8568 |  | 
 | 8569 | 	tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 
 | 8570 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8571 | 	err = tg3_request_irq(tp, 0); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8572 | 	if (err) | 
 | 8573 | 		return err; | 
 | 8574 |  | 
 | 8575 | 	/* Need to reset the chip because the MSI cycle may have terminated | 
 | 8576 | 	 * with Master Abort. | 
 | 8577 | 	 */ | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8578 | 	tg3_full_lock(tp, 1); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8579 |  | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 8580 | 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
| Gary Zambrano | 8e7a22e | 2006-04-29 18:59:13 -0700 | [diff] [blame] | 8581 | 	err = tg3_init_hw(tp, 1); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8582 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8583 | 	tg3_full_unlock(tp); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8584 |  | 
 | 8585 | 	if (err) | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8586 | 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8587 |  | 
 | 8588 | 	return err; | 
 | 8589 | } | 
 | 8590 |  | 
| Matt Carlson | 9e9fd12 | 2009-01-19 16:57:45 -0800 | [diff] [blame] | 8591 | static int tg3_request_firmware(struct tg3 *tp) | 
 | 8592 | { | 
 | 8593 | 	const __be32 *fw_data; | 
 | 8594 |  | 
 | 8595 | 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { | 
 | 8596 | 		printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", | 
 | 8597 | 		       tp->dev->name, tp->fw_needed); | 
 | 8598 | 		return -ENOENT; | 
 | 8599 | 	} | 
 | 8600 |  | 
 | 8601 | 	fw_data = (void *)tp->fw->data; | 
 | 8602 |  | 
 | 8603 | 	/* Firmware blob starts with version numbers, followed by | 
 | 8604 | 	 * start address and _full_ length including BSS sections | 
 | 8605 | 	 * (which must be longer than the actual data, of course | 
 | 8606 | 	 */ | 
 | 8607 |  | 
 | 8608 | 	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */ | 
 | 8609 | 	if (tp->fw_len < (tp->fw->size - 12)) { | 
 | 8610 | 		printk(KERN_ERR "%s: bogus length %d in \"%s\"\n", | 
 | 8611 | 		       tp->dev->name, tp->fw_len, tp->fw_needed); | 
 | 8612 | 		release_firmware(tp->fw); | 
 | 8613 | 		tp->fw = NULL; | 
 | 8614 | 		return -EINVAL; | 
 | 8615 | 	} | 
 | 8616 |  | 
 | 8617 | 	/* We no longer need firmware; we have it. */ | 
 | 8618 | 	tp->fw_needed = NULL; | 
 | 8619 | 	return 0; | 
 | 8620 | } | 
 | 8621 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8622 | static bool tg3_enable_msix(struct tg3 *tp) | 
 | 8623 | { | 
 | 8624 | 	int i, rc, cpus = num_online_cpus(); | 
 | 8625 | 	struct msix_entry msix_ent[tp->irq_max]; | 
 | 8626 |  | 
 | 8627 | 	if (cpus == 1) | 
 | 8628 | 		/* Just fallback to the simpler MSI mode. */ | 
 | 8629 | 		return false; | 
 | 8630 |  | 
 | 8631 | 	/* | 
 | 8632 | 	 * We want as many rx rings enabled as there are cpus. | 
 | 8633 | 	 * The first MSIX vector only deals with link interrupts, etc, | 
 | 8634 | 	 * so we add one to the number of vectors we are requesting. | 
 | 8635 | 	 */ | 
 | 8636 | 	tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); | 
 | 8637 |  | 
 | 8638 | 	for (i = 0; i < tp->irq_max; i++) { | 
 | 8639 | 		msix_ent[i].entry  = i; | 
 | 8640 | 		msix_ent[i].vector = 0; | 
 | 8641 | 	} | 
 | 8642 |  | 
 | 8643 | 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); | 
 | 8644 | 	if (rc != 0) { | 
 | 8645 | 		if (rc < TG3_RSS_MIN_NUM_MSIX_VECS) | 
 | 8646 | 			return false; | 
 | 8647 | 		if (pci_enable_msix(tp->pdev, msix_ent, rc)) | 
 | 8648 | 			return false; | 
 | 8649 | 		printk(KERN_NOTICE | 
 | 8650 | 		       "%s: Requested %d MSI-X vectors, received %d\n", | 
 | 8651 | 		       tp->dev->name, tp->irq_cnt, rc); | 
 | 8652 | 		tp->irq_cnt = rc; | 
 | 8653 | 	} | 
 | 8654 |  | 
| Matt Carlson | baf8a94 | 2009-09-01 13:13:00 +0000 | [diff] [blame] | 8655 | 	tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; | 
 | 8656 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8657 | 	for (i = 0; i < tp->irq_max; i++) | 
 | 8658 | 		tp->napi[i].irq_vec = msix_ent[i].vector; | 
 | 8659 |  | 
| Matt Carlson | 19cfaec | 2009-12-03 08:36:20 +0000 | [diff] [blame] | 8660 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 
 | 8661 | 		tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; | 
 | 8662 | 		tp->dev->real_num_tx_queues = tp->irq_cnt - 1; | 
 | 8663 | 	} else | 
 | 8664 | 		tp->dev->real_num_tx_queues = 1; | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 8665 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8666 | 	return true; | 
 | 8667 | } | 
 | 8668 |  | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8669 | static void tg3_ints_init(struct tg3 *tp) | 
 | 8670 | { | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8671 | 	if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) && | 
 | 8672 | 	    !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) { | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8673 | 		/* All MSI supporting chips should support tagged | 
 | 8674 | 		 * status.  Assert that this is the case. | 
 | 8675 | 		 */ | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8676 | 		printk(KERN_WARNING PFX "%s: MSI without TAGGED? " | 
 | 8677 | 		       "Not using MSI.\n", tp->dev->name); | 
 | 8678 | 		goto defcfg; | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8679 | 	} | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8680 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8681 | 	if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp)) | 
 | 8682 | 		tp->tg3_flags2 |= TG3_FLG2_USING_MSIX; | 
 | 8683 | 	else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) && | 
 | 8684 | 		 pci_enable_msi(tp->pdev) == 0) | 
 | 8685 | 		tp->tg3_flags2 |= TG3_FLG2_USING_MSI; | 
 | 8686 |  | 
 | 8687 | 	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { | 
 | 8688 | 		u32 msi_mode = tr32(MSGINT_MODE); | 
| Matt Carlson | baf8a94 | 2009-09-01 13:13:00 +0000 | [diff] [blame] | 8689 | 		if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 
 | 8690 | 			msi_mode |= MSGINT_MODE_MULTIVEC_EN; | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8691 | 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); | 
 | 8692 | 	} | 
 | 8693 | defcfg: | 
 | 8694 | 	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { | 
 | 8695 | 		tp->irq_cnt = 1; | 
 | 8696 | 		tp->napi[0].irq_vec = tp->pdev->irq; | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 8697 | 		tp->dev->real_num_tx_queues = 1; | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8698 | 	} | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8699 | } | 
 | 8700 |  | 
 | 8701 | static void tg3_ints_fini(struct tg3 *tp) | 
 | 8702 | { | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8703 | 	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 
 | 8704 | 		pci_disable_msix(tp->pdev); | 
 | 8705 | 	else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) | 
 | 8706 | 		pci_disable_msi(tp->pdev); | 
 | 8707 | 	tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX; | 
| Matt Carlson | baf8a94 | 2009-09-01 13:13:00 +0000 | [diff] [blame] | 8708 | 	tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS; | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8709 | } | 
 | 8710 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8711 | static int tg3_open(struct net_device *dev) | 
 | 8712 | { | 
 | 8713 | 	struct tg3 *tp = netdev_priv(dev); | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8714 | 	int i, err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8715 |  | 
| Matt Carlson | 9e9fd12 | 2009-01-19 16:57:45 -0800 | [diff] [blame] | 8716 | 	if (tp->fw_needed) { | 
 | 8717 | 		err = tg3_request_firmware(tp); | 
 | 8718 | 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { | 
 | 8719 | 			if (err) | 
 | 8720 | 				return err; | 
 | 8721 | 		} else if (err) { | 
 | 8722 | 			printk(KERN_WARNING "%s: TSO capability disabled.\n", | 
 | 8723 | 			       tp->dev->name); | 
 | 8724 | 			tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; | 
 | 8725 | 		} else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { | 
 | 8726 | 			printk(KERN_NOTICE "%s: TSO capability restored.\n", | 
 | 8727 | 			       tp->dev->name); | 
 | 8728 | 			tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 
 | 8729 | 		} | 
 | 8730 | 	} | 
 | 8731 |  | 
| Michael Chan | c49a156 | 2006-12-17 17:07:29 -0800 | [diff] [blame] | 8732 | 	netif_carrier_off(tp->dev); | 
 | 8733 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 8734 | 	err = tg3_set_power_state(tp, PCI_D0); | 
| Matt Carlson | 2f751b6 | 2008-08-04 23:17:34 -0700 | [diff] [blame] | 8735 | 	if (err) | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 8736 | 		return err; | 
| Matt Carlson | 2f751b6 | 2008-08-04 23:17:34 -0700 | [diff] [blame] | 8737 |  | 
 | 8738 | 	tg3_full_lock(tp, 0); | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 8739 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8740 | 	tg3_disable_ints(tp); | 
 | 8741 | 	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 
 | 8742 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8743 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8744 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8745 | 	/* | 
 | 8746 | 	 * Setup interrupts first so we know how | 
 | 8747 | 	 * many NAPI resources to allocate | 
 | 8748 | 	 */ | 
 | 8749 | 	tg3_ints_init(tp); | 
 | 8750 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8751 | 	/* The placement of this call is tied | 
 | 8752 | 	 * to the setup and use of Host TX descriptors. | 
 | 8753 | 	 */ | 
 | 8754 | 	err = tg3_alloc_consistent(tp); | 
 | 8755 | 	if (err) | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8756 | 		goto err_out1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8757 |  | 
| Matt Carlson | fed9781 | 2009-09-01 13:10:19 +0000 | [diff] [blame] | 8758 | 	tg3_napi_enable(tp); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 8759 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8760 | 	for (i = 0; i < tp->irq_cnt; i++) { | 
 | 8761 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
 | 8762 | 		err = tg3_request_irq(tp, i); | 
 | 8763 | 		if (err) { | 
 | 8764 | 			for (i--; i >= 0; i--) | 
 | 8765 | 				free_irq(tnapi->irq_vec, tnapi); | 
 | 8766 | 			break; | 
 | 8767 | 		} | 
 | 8768 | 	} | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8769 |  | 
 | 8770 | 	if (err) | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8771 | 		goto err_out2; | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8772 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8773 | 	tg3_full_lock(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8774 |  | 
| Gary Zambrano | 8e7a22e | 2006-04-29 18:59:13 -0700 | [diff] [blame] | 8775 | 	err = tg3_init_hw(tp, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8776 | 	if (err) { | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 8777 | 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8778 | 		tg3_free_rings(tp); | 
 | 8779 | 	} else { | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 8780 | 		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | 
 | 8781 | 			tp->timer_offset = HZ; | 
 | 8782 | 		else | 
 | 8783 | 			tp->timer_offset = HZ / 10; | 
 | 8784 |  | 
 | 8785 | 		BUG_ON(tp->timer_offset > HZ); | 
 | 8786 | 		tp->timer_counter = tp->timer_multiplier = | 
 | 8787 | 			(HZ / tp->timer_offset); | 
 | 8788 | 		tp->asf_counter = tp->asf_multiplier = | 
| Michael Chan | 28fbef7 | 2005-10-26 15:48:35 -0700 | [diff] [blame] | 8789 | 			((HZ / tp->timer_offset) * 2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8790 |  | 
 | 8791 | 		init_timer(&tp->timer); | 
 | 8792 | 		tp->timer.expires = jiffies + tp->timer_offset; | 
 | 8793 | 		tp->timer.data = (unsigned long) tp; | 
 | 8794 | 		tp->timer.function = tg3_timer; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8795 | 	} | 
 | 8796 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8797 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8798 |  | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8799 | 	if (err) | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8800 | 		goto err_out3; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8801 |  | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8802 | 	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 
 | 8803 | 		err = tg3_test_msi(tp); | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 8804 |  | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8805 | 		if (err) { | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8806 | 			tg3_full_lock(tp, 0); | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 8807 | 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8808 | 			tg3_free_rings(tp); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8809 | 			tg3_full_unlock(tp); | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8810 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8811 | 			goto err_out2; | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8812 | 		} | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8813 |  | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 8814 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 8815 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 8816 | 		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && | 
 | 8817 | 		    (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { | 
 | 8818 | 			u32 val = tr32(PCIE_TRANSACTION_CFG); | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8819 |  | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 8820 | 			tw32(PCIE_TRANSACTION_CFG, | 
 | 8821 | 			     val | PCIE_TRANS_CFG_1SHOT_MSI); | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 8822 | 		} | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8823 | 	} | 
 | 8824 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 8825 | 	tg3_phy_start(tp); | 
 | 8826 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8827 | 	tg3_full_lock(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8828 |  | 
| Michael Chan | 7938109 | 2005-04-21 17:13:59 -0700 | [diff] [blame] | 8829 | 	add_timer(&tp->timer); | 
 | 8830 | 	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8831 | 	tg3_enable_ints(tp); | 
 | 8832 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 8833 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8834 |  | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 8835 | 	netif_tx_start_all_queues(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8836 |  | 
 | 8837 | 	return 0; | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8838 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8839 | err_out3: | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 8840 | 	for (i = tp->irq_cnt - 1; i >= 0; i--) { | 
 | 8841 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
 | 8842 | 		free_irq(tnapi->irq_vec, tnapi); | 
 | 8843 | 	} | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8844 |  | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8845 | err_out2: | 
| Matt Carlson | fed9781 | 2009-09-01 13:10:19 +0000 | [diff] [blame] | 8846 | 	tg3_napi_disable(tp); | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8847 | 	tg3_free_consistent(tp); | 
| Matt Carlson | 679563f | 2009-09-01 12:55:46 +0000 | [diff] [blame] | 8848 |  | 
 | 8849 | err_out1: | 
 | 8850 | 	tg3_ints_fini(tp); | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 8851 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8852 | } | 
 | 8853 |  | 
 | 8854 | #if 0 | 
 | 8855 | /*static*/ void tg3_dump_state(struct tg3 *tp) | 
 | 8856 | { | 
 | 8857 | 	u32 val32, val32_2, val32_3, val32_4, val32_5; | 
 | 8858 | 	u16 val16; | 
 | 8859 | 	int i; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 8860 | 	struct tg3_hw_status *sblk = tp->napi[0]->hw_status; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8861 |  | 
 | 8862 | 	pci_read_config_word(tp->pdev, PCI_STATUS, &val16); | 
 | 8863 | 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32); | 
 | 8864 | 	printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n", | 
 | 8865 | 	       val16, val32); | 
 | 8866 |  | 
 | 8867 | 	/* MAC block */ | 
 | 8868 | 	printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n", | 
 | 8869 | 	       tr32(MAC_MODE), tr32(MAC_STATUS)); | 
 | 8870 | 	printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n", | 
 | 8871 | 	       tr32(MAC_EVENT), tr32(MAC_LED_CTRL)); | 
 | 8872 | 	printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n", | 
 | 8873 | 	       tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS)); | 
 | 8874 | 	printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n", | 
 | 8875 | 	       tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS)); | 
 | 8876 |  | 
 | 8877 | 	/* Send data initiator control block */ | 
 | 8878 | 	printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n", | 
 | 8879 | 	       tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS)); | 
 | 8880 | 	printk("       SNDDATAI_STATSCTRL[%08x]\n", | 
 | 8881 | 	       tr32(SNDDATAI_STATSCTRL)); | 
 | 8882 |  | 
 | 8883 | 	/* Send data completion control block */ | 
 | 8884 | 	printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE)); | 
 | 8885 |  | 
 | 8886 | 	/* Send BD ring selector block */ | 
 | 8887 | 	printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n", | 
 | 8888 | 	       tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS)); | 
 | 8889 |  | 
 | 8890 | 	/* Send BD initiator control block */ | 
 | 8891 | 	printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n", | 
 | 8892 | 	       tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS)); | 
 | 8893 |  | 
 | 8894 | 	/* Send BD completion control block */ | 
 | 8895 | 	printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE)); | 
 | 8896 |  | 
 | 8897 | 	/* Receive list placement control block */ | 
 | 8898 | 	printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n", | 
 | 8899 | 	       tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS)); | 
 | 8900 | 	printk("       RCVLPC_STATSCTRL[%08x]\n", | 
 | 8901 | 	       tr32(RCVLPC_STATSCTRL)); | 
 | 8902 |  | 
 | 8903 | 	/* Receive data and receive BD initiator control block */ | 
 | 8904 | 	printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n", | 
 | 8905 | 	       tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS)); | 
 | 8906 |  | 
 | 8907 | 	/* Receive data completion control block */ | 
 | 8908 | 	printk("DEBUG: RCVDCC_MODE[%08x]\n", | 
 | 8909 | 	       tr32(RCVDCC_MODE)); | 
 | 8910 |  | 
 | 8911 | 	/* Receive BD initiator control block */ | 
 | 8912 | 	printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n", | 
 | 8913 | 	       tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS)); | 
 | 8914 |  | 
 | 8915 | 	/* Receive BD completion control block */ | 
 | 8916 | 	printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n", | 
 | 8917 | 	       tr32(RCVCC_MODE), tr32(RCVCC_STATUS)); | 
 | 8918 |  | 
 | 8919 | 	/* Receive list selector control block */ | 
 | 8920 | 	printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n", | 
 | 8921 | 	       tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS)); | 
 | 8922 |  | 
 | 8923 | 	/* Mbuf cluster free block */ | 
 | 8924 | 	printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n", | 
 | 8925 | 	       tr32(MBFREE_MODE), tr32(MBFREE_STATUS)); | 
 | 8926 |  | 
 | 8927 | 	/* Host coalescing control block */ | 
 | 8928 | 	printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n", | 
 | 8929 | 	       tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS)); | 
 | 8930 | 	printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n", | 
 | 8931 | 	       tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), | 
 | 8932 | 	       tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); | 
 | 8933 | 	printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n", | 
 | 8934 | 	       tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH), | 
 | 8935 | 	       tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW)); | 
 | 8936 | 	printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n", | 
 | 8937 | 	       tr32(HOSTCC_STATS_BLK_NIC_ADDR)); | 
 | 8938 | 	printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n", | 
 | 8939 | 	       tr32(HOSTCC_STATUS_BLK_NIC_ADDR)); | 
 | 8940 |  | 
 | 8941 | 	/* Memory arbiter control block */ | 
 | 8942 | 	printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n", | 
 | 8943 | 	       tr32(MEMARB_MODE), tr32(MEMARB_STATUS)); | 
 | 8944 |  | 
 | 8945 | 	/* Buffer manager control block */ | 
 | 8946 | 	printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n", | 
 | 8947 | 	       tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS)); | 
 | 8948 | 	printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n", | 
 | 8949 | 	       tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE)); | 
 | 8950 | 	printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] " | 
 | 8951 | 	       "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n", | 
 | 8952 | 	       tr32(BUFMGR_DMA_DESC_POOL_ADDR), | 
 | 8953 | 	       tr32(BUFMGR_DMA_DESC_POOL_SIZE)); | 
 | 8954 |  | 
 | 8955 | 	/* Read DMA control block */ | 
 | 8956 | 	printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n", | 
 | 8957 | 	       tr32(RDMAC_MODE), tr32(RDMAC_STATUS)); | 
 | 8958 |  | 
 | 8959 | 	/* Write DMA control block */ | 
 | 8960 | 	printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n", | 
 | 8961 | 	       tr32(WDMAC_MODE), tr32(WDMAC_STATUS)); | 
 | 8962 |  | 
 | 8963 | 	/* DMA completion block */ | 
 | 8964 | 	printk("DEBUG: DMAC_MODE[%08x]\n", | 
 | 8965 | 	       tr32(DMAC_MODE)); | 
 | 8966 |  | 
 | 8967 | 	/* GRC block */ | 
 | 8968 | 	printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n", | 
 | 8969 | 	       tr32(GRC_MODE), tr32(GRC_MISC_CFG)); | 
 | 8970 | 	printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n", | 
 | 8971 | 	       tr32(GRC_LOCAL_CTRL)); | 
 | 8972 |  | 
 | 8973 | 	/* TG3_BDINFOs */ | 
 | 8974 | 	printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n", | 
 | 8975 | 	       tr32(RCVDBDI_JUMBO_BD + 0x0), | 
 | 8976 | 	       tr32(RCVDBDI_JUMBO_BD + 0x4), | 
 | 8977 | 	       tr32(RCVDBDI_JUMBO_BD + 0x8), | 
 | 8978 | 	       tr32(RCVDBDI_JUMBO_BD + 0xc)); | 
 | 8979 | 	printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n", | 
 | 8980 | 	       tr32(RCVDBDI_STD_BD + 0x0), | 
 | 8981 | 	       tr32(RCVDBDI_STD_BD + 0x4), | 
 | 8982 | 	       tr32(RCVDBDI_STD_BD + 0x8), | 
 | 8983 | 	       tr32(RCVDBDI_STD_BD + 0xc)); | 
 | 8984 | 	printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n", | 
 | 8985 | 	       tr32(RCVDBDI_MINI_BD + 0x0), | 
 | 8986 | 	       tr32(RCVDBDI_MINI_BD + 0x4), | 
 | 8987 | 	       tr32(RCVDBDI_MINI_BD + 0x8), | 
 | 8988 | 	       tr32(RCVDBDI_MINI_BD + 0xc)); | 
 | 8989 |  | 
 | 8990 | 	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32); | 
 | 8991 | 	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2); | 
 | 8992 | 	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3); | 
 | 8993 | 	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4); | 
 | 8994 | 	printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n", | 
 | 8995 | 	       val32, val32_2, val32_3, val32_4); | 
 | 8996 |  | 
 | 8997 | 	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32); | 
 | 8998 | 	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2); | 
 | 8999 | 	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3); | 
 | 9000 | 	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4); | 
 | 9001 | 	printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n", | 
 | 9002 | 	       val32, val32_2, val32_3, val32_4); | 
 | 9003 |  | 
 | 9004 | 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32); | 
 | 9005 | 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2); | 
 | 9006 | 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3); | 
 | 9007 | 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4); | 
 | 9008 | 	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5); | 
 | 9009 | 	printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n", | 
 | 9010 | 	       val32, val32_2, val32_3, val32_4, val32_5); | 
 | 9011 |  | 
 | 9012 | 	/* SW status block */ | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 9013 | 	printk(KERN_DEBUG | 
 | 9014 | 	 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", | 
 | 9015 | 	       sblk->status, | 
 | 9016 | 	       sblk->status_tag, | 
 | 9017 | 	       sblk->rx_jumbo_consumer, | 
 | 9018 | 	       sblk->rx_consumer, | 
 | 9019 | 	       sblk->rx_mini_consumer, | 
 | 9020 | 	       sblk->idx[0].rx_producer, | 
 | 9021 | 	       sblk->idx[0].tx_consumer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9022 |  | 
 | 9023 | 	/* SW statistics block */ | 
 | 9024 | 	printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n", | 
 | 9025 | 	       ((u32 *)tp->hw_stats)[0], | 
 | 9026 | 	       ((u32 *)tp->hw_stats)[1], | 
 | 9027 | 	       ((u32 *)tp->hw_stats)[2], | 
 | 9028 | 	       ((u32 *)tp->hw_stats)[3]); | 
 | 9029 |  | 
 | 9030 | 	/* Mailboxes */ | 
 | 9031 | 	printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n", | 
| Michael Chan | 09ee929 | 2005-08-09 20:17:00 -0700 | [diff] [blame] | 9032 | 	       tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0), | 
 | 9033 | 	       tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4), | 
 | 9034 | 	       tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0), | 
 | 9035 | 	       tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9036 |  | 
 | 9037 | 	/* NIC side send descriptors. */ | 
 | 9038 | 	for (i = 0; i < 6; i++) { | 
 | 9039 | 		unsigned long txd; | 
 | 9040 |  | 
 | 9041 | 		txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC | 
 | 9042 | 			+ (i * sizeof(struct tg3_tx_buffer_desc)); | 
 | 9043 | 		printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n", | 
 | 9044 | 		       i, | 
 | 9045 | 		       readl(txd + 0x0), readl(txd + 0x4), | 
 | 9046 | 		       readl(txd + 0x8), readl(txd + 0xc)); | 
 | 9047 | 	} | 
 | 9048 |  | 
 | 9049 | 	/* NIC side RX descriptors. */ | 
 | 9050 | 	for (i = 0; i < 6; i++) { | 
 | 9051 | 		unsigned long rxd; | 
 | 9052 |  | 
 | 9053 | 		rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC | 
 | 9054 | 			+ (i * sizeof(struct tg3_rx_buffer_desc)); | 
 | 9055 | 		printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n", | 
 | 9056 | 		       i, | 
 | 9057 | 		       readl(rxd + 0x0), readl(rxd + 0x4), | 
 | 9058 | 		       readl(rxd + 0x8), readl(rxd + 0xc)); | 
 | 9059 | 		rxd += (4 * sizeof(u32)); | 
 | 9060 | 		printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n", | 
 | 9061 | 		       i, | 
 | 9062 | 		       readl(rxd + 0x0), readl(rxd + 0x4), | 
 | 9063 | 		       readl(rxd + 0x8), readl(rxd + 0xc)); | 
 | 9064 | 	} | 
 | 9065 |  | 
 | 9066 | 	for (i = 0; i < 6; i++) { | 
 | 9067 | 		unsigned long rxd; | 
 | 9068 |  | 
 | 9069 | 		rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC | 
 | 9070 | 			+ (i * sizeof(struct tg3_rx_buffer_desc)); | 
 | 9071 | 		printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n", | 
 | 9072 | 		       i, | 
 | 9073 | 		       readl(rxd + 0x0), readl(rxd + 0x4), | 
 | 9074 | 		       readl(rxd + 0x8), readl(rxd + 0xc)); | 
 | 9075 | 		rxd += (4 * sizeof(u32)); | 
 | 9076 | 		printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n", | 
 | 9077 | 		       i, | 
 | 9078 | 		       readl(rxd + 0x0), readl(rxd + 0x4), | 
 | 9079 | 		       readl(rxd + 0x8), readl(rxd + 0xc)); | 
 | 9080 | 	} | 
 | 9081 | } | 
 | 9082 | #endif | 
 | 9083 |  | 
 | 9084 | static struct net_device_stats *tg3_get_stats(struct net_device *); | 
 | 9085 | static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); | 
 | 9086 |  | 
 | 9087 | static int tg3_close(struct net_device *dev) | 
 | 9088 | { | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 9089 | 	int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9090 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9091 |  | 
| Matt Carlson | fed9781 | 2009-09-01 13:10:19 +0000 | [diff] [blame] | 9092 | 	tg3_napi_disable(tp); | 
| Oleg Nesterov | 28e53bd | 2007-05-09 02:34:22 -0700 | [diff] [blame] | 9093 | 	cancel_work_sync(&tp->reset_task); | 
| Michael Chan | 7faa006 | 2006-02-02 17:29:28 -0800 | [diff] [blame] | 9094 |  | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 9095 | 	netif_tx_stop_all_queues(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9096 |  | 
 | 9097 | 	del_timer_sync(&tp->timer); | 
 | 9098 |  | 
| Matt Carlson | 24bb4fb | 2009-10-05 17:55:29 +0000 | [diff] [blame] | 9099 | 	tg3_phy_stop(tp); | 
 | 9100 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9101 | 	tg3_full_lock(tp, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9102 | #if 0 | 
 | 9103 | 	tg3_dump_state(tp); | 
 | 9104 | #endif | 
 | 9105 |  | 
 | 9106 | 	tg3_disable_ints(tp); | 
 | 9107 |  | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 9108 | 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9109 | 	tg3_free_rings(tp); | 
| Michael Chan | 5cf64b8 | 2007-05-05 12:11:21 -0700 | [diff] [blame] | 9110 | 	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9111 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9112 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9113 |  | 
| Matt Carlson | 4f125f4 | 2009-09-01 12:55:02 +0000 | [diff] [blame] | 9114 | 	for (i = tp->irq_cnt - 1; i >= 0; i--) { | 
 | 9115 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
 | 9116 | 		free_irq(tnapi->irq_vec, tnapi); | 
 | 9117 | 	} | 
| Matt Carlson | 07b0173 | 2009-08-28 14:01:15 +0000 | [diff] [blame] | 9118 |  | 
 | 9119 | 	tg3_ints_fini(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9120 |  | 
 | 9121 | 	memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev), | 
 | 9122 | 	       sizeof(tp->net_stats_prev)); | 
 | 9123 | 	memcpy(&tp->estats_prev, tg3_get_estats(tp), | 
 | 9124 | 	       sizeof(tp->estats_prev)); | 
 | 9125 |  | 
 | 9126 | 	tg3_free_consistent(tp); | 
 | 9127 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 9128 | 	tg3_set_power_state(tp, PCI_D3hot); | 
 | 9129 |  | 
 | 9130 | 	netif_carrier_off(tp->dev); | 
 | 9131 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9132 | 	return 0; | 
 | 9133 | } | 
 | 9134 |  | 
 | 9135 | static inline unsigned long get_stat64(tg3_stat64_t *val) | 
 | 9136 | { | 
 | 9137 | 	unsigned long ret; | 
 | 9138 |  | 
 | 9139 | #if (BITS_PER_LONG == 32) | 
 | 9140 | 	ret = val->low; | 
 | 9141 | #else | 
 | 9142 | 	ret = ((u64)val->high << 32) | ((u64)val->low); | 
 | 9143 | #endif | 
 | 9144 | 	return ret; | 
 | 9145 | } | 
 | 9146 |  | 
| Stefan Buehler | 816f8b8 | 2008-08-15 14:10:54 -0700 | [diff] [blame] | 9147 | static inline u64 get_estat64(tg3_stat64_t *val) | 
 | 9148 | { | 
 | 9149 |        return ((u64)val->high << 32) | ((u64)val->low); | 
 | 9150 | } | 
 | 9151 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9152 | static unsigned long calc_crc_errors(struct tg3 *tp) | 
 | 9153 | { | 
 | 9154 | 	struct tg3_hw_stats *hw_stats = tp->hw_stats; | 
 | 9155 |  | 
 | 9156 | 	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && | 
 | 9157 | 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 9158 | 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9159 | 		u32 val; | 
 | 9160 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9161 | 		spin_lock_bh(&tp->lock); | 
| Michael Chan | 569a5df | 2007-02-13 12:18:15 -0800 | [diff] [blame] | 9162 | 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { | 
 | 9163 | 			tg3_writephy(tp, MII_TG3_TEST1, | 
 | 9164 | 				     val | MII_TG3_TEST1_CRC_EN); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9165 | 			tg3_readphy(tp, 0x14, &val); | 
 | 9166 | 		} else | 
 | 9167 | 			val = 0; | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9168 | 		spin_unlock_bh(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9169 |  | 
 | 9170 | 		tp->phy_crc_errors += val; | 
 | 9171 |  | 
 | 9172 | 		return tp->phy_crc_errors; | 
 | 9173 | 	} | 
 | 9174 |  | 
 | 9175 | 	return get_stat64(&hw_stats->rx_fcs_errors); | 
 | 9176 | } | 
 | 9177 |  | 
 | 9178 | #define ESTAT_ADD(member) \ | 
 | 9179 | 	estats->member =	old_estats->member + \ | 
| Stefan Buehler | 816f8b8 | 2008-08-15 14:10:54 -0700 | [diff] [blame] | 9180 | 				get_estat64(&hw_stats->member) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9181 |  | 
 | 9182 | static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) | 
 | 9183 | { | 
 | 9184 | 	struct tg3_ethtool_stats *estats = &tp->estats; | 
 | 9185 | 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev; | 
 | 9186 | 	struct tg3_hw_stats *hw_stats = tp->hw_stats; | 
 | 9187 |  | 
 | 9188 | 	if (!hw_stats) | 
 | 9189 | 		return old_estats; | 
 | 9190 |  | 
 | 9191 | 	ESTAT_ADD(rx_octets); | 
 | 9192 | 	ESTAT_ADD(rx_fragments); | 
 | 9193 | 	ESTAT_ADD(rx_ucast_packets); | 
 | 9194 | 	ESTAT_ADD(rx_mcast_packets); | 
 | 9195 | 	ESTAT_ADD(rx_bcast_packets); | 
 | 9196 | 	ESTAT_ADD(rx_fcs_errors); | 
 | 9197 | 	ESTAT_ADD(rx_align_errors); | 
 | 9198 | 	ESTAT_ADD(rx_xon_pause_rcvd); | 
 | 9199 | 	ESTAT_ADD(rx_xoff_pause_rcvd); | 
 | 9200 | 	ESTAT_ADD(rx_mac_ctrl_rcvd); | 
 | 9201 | 	ESTAT_ADD(rx_xoff_entered); | 
 | 9202 | 	ESTAT_ADD(rx_frame_too_long_errors); | 
 | 9203 | 	ESTAT_ADD(rx_jabbers); | 
 | 9204 | 	ESTAT_ADD(rx_undersize_packets); | 
 | 9205 | 	ESTAT_ADD(rx_in_length_errors); | 
 | 9206 | 	ESTAT_ADD(rx_out_length_errors); | 
 | 9207 | 	ESTAT_ADD(rx_64_or_less_octet_packets); | 
 | 9208 | 	ESTAT_ADD(rx_65_to_127_octet_packets); | 
 | 9209 | 	ESTAT_ADD(rx_128_to_255_octet_packets); | 
 | 9210 | 	ESTAT_ADD(rx_256_to_511_octet_packets); | 
 | 9211 | 	ESTAT_ADD(rx_512_to_1023_octet_packets); | 
 | 9212 | 	ESTAT_ADD(rx_1024_to_1522_octet_packets); | 
 | 9213 | 	ESTAT_ADD(rx_1523_to_2047_octet_packets); | 
 | 9214 | 	ESTAT_ADD(rx_2048_to_4095_octet_packets); | 
 | 9215 | 	ESTAT_ADD(rx_4096_to_8191_octet_packets); | 
 | 9216 | 	ESTAT_ADD(rx_8192_to_9022_octet_packets); | 
 | 9217 |  | 
 | 9218 | 	ESTAT_ADD(tx_octets); | 
 | 9219 | 	ESTAT_ADD(tx_collisions); | 
 | 9220 | 	ESTAT_ADD(tx_xon_sent); | 
 | 9221 | 	ESTAT_ADD(tx_xoff_sent); | 
 | 9222 | 	ESTAT_ADD(tx_flow_control); | 
 | 9223 | 	ESTAT_ADD(tx_mac_errors); | 
 | 9224 | 	ESTAT_ADD(tx_single_collisions); | 
 | 9225 | 	ESTAT_ADD(tx_mult_collisions); | 
 | 9226 | 	ESTAT_ADD(tx_deferred); | 
 | 9227 | 	ESTAT_ADD(tx_excessive_collisions); | 
 | 9228 | 	ESTAT_ADD(tx_late_collisions); | 
 | 9229 | 	ESTAT_ADD(tx_collide_2times); | 
 | 9230 | 	ESTAT_ADD(tx_collide_3times); | 
 | 9231 | 	ESTAT_ADD(tx_collide_4times); | 
 | 9232 | 	ESTAT_ADD(tx_collide_5times); | 
 | 9233 | 	ESTAT_ADD(tx_collide_6times); | 
 | 9234 | 	ESTAT_ADD(tx_collide_7times); | 
 | 9235 | 	ESTAT_ADD(tx_collide_8times); | 
 | 9236 | 	ESTAT_ADD(tx_collide_9times); | 
 | 9237 | 	ESTAT_ADD(tx_collide_10times); | 
 | 9238 | 	ESTAT_ADD(tx_collide_11times); | 
 | 9239 | 	ESTAT_ADD(tx_collide_12times); | 
 | 9240 | 	ESTAT_ADD(tx_collide_13times); | 
 | 9241 | 	ESTAT_ADD(tx_collide_14times); | 
 | 9242 | 	ESTAT_ADD(tx_collide_15times); | 
 | 9243 | 	ESTAT_ADD(tx_ucast_packets); | 
 | 9244 | 	ESTAT_ADD(tx_mcast_packets); | 
 | 9245 | 	ESTAT_ADD(tx_bcast_packets); | 
 | 9246 | 	ESTAT_ADD(tx_carrier_sense_errors); | 
 | 9247 | 	ESTAT_ADD(tx_discards); | 
 | 9248 | 	ESTAT_ADD(tx_errors); | 
 | 9249 |  | 
 | 9250 | 	ESTAT_ADD(dma_writeq_full); | 
 | 9251 | 	ESTAT_ADD(dma_write_prioq_full); | 
 | 9252 | 	ESTAT_ADD(rxbds_empty); | 
 | 9253 | 	ESTAT_ADD(rx_discards); | 
 | 9254 | 	ESTAT_ADD(rx_errors); | 
 | 9255 | 	ESTAT_ADD(rx_threshold_hit); | 
 | 9256 |  | 
 | 9257 | 	ESTAT_ADD(dma_readq_full); | 
 | 9258 | 	ESTAT_ADD(dma_read_prioq_full); | 
 | 9259 | 	ESTAT_ADD(tx_comp_queue_full); | 
 | 9260 |  | 
 | 9261 | 	ESTAT_ADD(ring_set_send_prod_index); | 
 | 9262 | 	ESTAT_ADD(ring_status_update); | 
 | 9263 | 	ESTAT_ADD(nic_irqs); | 
 | 9264 | 	ESTAT_ADD(nic_avoided_irqs); | 
 | 9265 | 	ESTAT_ADD(nic_tx_threshold_hit); | 
 | 9266 |  | 
 | 9267 | 	return estats; | 
 | 9268 | } | 
 | 9269 |  | 
 | 9270 | static struct net_device_stats *tg3_get_stats(struct net_device *dev) | 
 | 9271 | { | 
 | 9272 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9273 | 	struct net_device_stats *stats = &tp->net_stats; | 
 | 9274 | 	struct net_device_stats *old_stats = &tp->net_stats_prev; | 
 | 9275 | 	struct tg3_hw_stats *hw_stats = tp->hw_stats; | 
 | 9276 |  | 
 | 9277 | 	if (!hw_stats) | 
 | 9278 | 		return old_stats; | 
 | 9279 |  | 
 | 9280 | 	stats->rx_packets = old_stats->rx_packets + | 
 | 9281 | 		get_stat64(&hw_stats->rx_ucast_packets) + | 
 | 9282 | 		get_stat64(&hw_stats->rx_mcast_packets) + | 
 | 9283 | 		get_stat64(&hw_stats->rx_bcast_packets); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9284 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9285 | 	stats->tx_packets = old_stats->tx_packets + | 
 | 9286 | 		get_stat64(&hw_stats->tx_ucast_packets) + | 
 | 9287 | 		get_stat64(&hw_stats->tx_mcast_packets) + | 
 | 9288 | 		get_stat64(&hw_stats->tx_bcast_packets); | 
 | 9289 |  | 
 | 9290 | 	stats->rx_bytes = old_stats->rx_bytes + | 
 | 9291 | 		get_stat64(&hw_stats->rx_octets); | 
 | 9292 | 	stats->tx_bytes = old_stats->tx_bytes + | 
 | 9293 | 		get_stat64(&hw_stats->tx_octets); | 
 | 9294 |  | 
 | 9295 | 	stats->rx_errors = old_stats->rx_errors + | 
| John W. Linville | 4f63b87 | 2005-09-12 14:43:18 -0700 | [diff] [blame] | 9296 | 		get_stat64(&hw_stats->rx_errors); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9297 | 	stats->tx_errors = old_stats->tx_errors + | 
 | 9298 | 		get_stat64(&hw_stats->tx_errors) + | 
 | 9299 | 		get_stat64(&hw_stats->tx_mac_errors) + | 
 | 9300 | 		get_stat64(&hw_stats->tx_carrier_sense_errors) + | 
 | 9301 | 		get_stat64(&hw_stats->tx_discards); | 
 | 9302 |  | 
 | 9303 | 	stats->multicast = old_stats->multicast + | 
 | 9304 | 		get_stat64(&hw_stats->rx_mcast_packets); | 
 | 9305 | 	stats->collisions = old_stats->collisions + | 
 | 9306 | 		get_stat64(&hw_stats->tx_collisions); | 
 | 9307 |  | 
 | 9308 | 	stats->rx_length_errors = old_stats->rx_length_errors + | 
 | 9309 | 		get_stat64(&hw_stats->rx_frame_too_long_errors) + | 
 | 9310 | 		get_stat64(&hw_stats->rx_undersize_packets); | 
 | 9311 |  | 
 | 9312 | 	stats->rx_over_errors = old_stats->rx_over_errors + | 
 | 9313 | 		get_stat64(&hw_stats->rxbds_empty); | 
 | 9314 | 	stats->rx_frame_errors = old_stats->rx_frame_errors + | 
 | 9315 | 		get_stat64(&hw_stats->rx_align_errors); | 
 | 9316 | 	stats->tx_aborted_errors = old_stats->tx_aborted_errors + | 
 | 9317 | 		get_stat64(&hw_stats->tx_discards); | 
 | 9318 | 	stats->tx_carrier_errors = old_stats->tx_carrier_errors + | 
 | 9319 | 		get_stat64(&hw_stats->tx_carrier_sense_errors); | 
 | 9320 |  | 
 | 9321 | 	stats->rx_crc_errors = old_stats->rx_crc_errors + | 
 | 9322 | 		calc_crc_errors(tp); | 
 | 9323 |  | 
| John W. Linville | 4f63b87 | 2005-09-12 14:43:18 -0700 | [diff] [blame] | 9324 | 	stats->rx_missed_errors = old_stats->rx_missed_errors + | 
 | 9325 | 		get_stat64(&hw_stats->rx_discards); | 
 | 9326 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9327 | 	return stats; | 
 | 9328 | } | 
 | 9329 |  | 
 | 9330 | static inline u32 calc_crc(unsigned char *buf, int len) | 
 | 9331 | { | 
 | 9332 | 	u32 reg; | 
 | 9333 | 	u32 tmp; | 
 | 9334 | 	int j, k; | 
 | 9335 |  | 
 | 9336 | 	reg = 0xffffffff; | 
 | 9337 |  | 
 | 9338 | 	for (j = 0; j < len; j++) { | 
 | 9339 | 		reg ^= buf[j]; | 
 | 9340 |  | 
 | 9341 | 		for (k = 0; k < 8; k++) { | 
 | 9342 | 			tmp = reg & 0x01; | 
 | 9343 |  | 
 | 9344 | 			reg >>= 1; | 
 | 9345 |  | 
 | 9346 | 			if (tmp) { | 
 | 9347 | 				reg ^= 0xedb88320; | 
 | 9348 | 			} | 
 | 9349 | 		} | 
 | 9350 | 	} | 
 | 9351 |  | 
 | 9352 | 	return ~reg; | 
 | 9353 | } | 
 | 9354 |  | 
 | 9355 | static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) | 
 | 9356 | { | 
 | 9357 | 	/* accept or reject all multicast frames */ | 
 | 9358 | 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); | 
 | 9359 | 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); | 
 | 9360 | 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); | 
 | 9361 | 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); | 
 | 9362 | } | 
 | 9363 |  | 
 | 9364 | static void __tg3_set_rx_mode(struct net_device *dev) | 
 | 9365 | { | 
 | 9366 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9367 | 	u32 rx_mode; | 
 | 9368 |  | 
 | 9369 | 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | | 
 | 9370 | 				  RX_MODE_KEEP_VLAN_TAG); | 
 | 9371 |  | 
 | 9372 | 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG | 
 | 9373 | 	 * flag clear. | 
 | 9374 | 	 */ | 
 | 9375 | #if TG3_VLAN_TAG_USED | 
 | 9376 | 	if (!tp->vlgrp && | 
 | 9377 | 	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | 
 | 9378 | 		rx_mode |= RX_MODE_KEEP_VLAN_TAG; | 
 | 9379 | #else | 
 | 9380 | 	/* By definition, VLAN is disabled always in this | 
 | 9381 | 	 * case. | 
 | 9382 | 	 */ | 
 | 9383 | 	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | 
 | 9384 | 		rx_mode |= RX_MODE_KEEP_VLAN_TAG; | 
 | 9385 | #endif | 
 | 9386 |  | 
 | 9387 | 	if (dev->flags & IFF_PROMISC) { | 
 | 9388 | 		/* Promiscuous mode. */ | 
 | 9389 | 		rx_mode |= RX_MODE_PROMISC; | 
 | 9390 | 	} else if (dev->flags & IFF_ALLMULTI) { | 
 | 9391 | 		/* Accept all multicast. */ | 
 | 9392 | 		tg3_set_multi (tp, 1); | 
 | 9393 | 	} else if (dev->mc_count < 1) { | 
 | 9394 | 		/* Reject all multicast. */ | 
 | 9395 | 		tg3_set_multi (tp, 0); | 
 | 9396 | 	} else { | 
 | 9397 | 		/* Accept one or more multicast(s). */ | 
 | 9398 | 		struct dev_mc_list *mclist; | 
 | 9399 | 		unsigned int i; | 
 | 9400 | 		u32 mc_filter[4] = { 0, }; | 
 | 9401 | 		u32 regidx; | 
 | 9402 | 		u32 bit; | 
 | 9403 | 		u32 crc; | 
 | 9404 |  | 
 | 9405 | 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | 
 | 9406 | 		     i++, mclist = mclist->next) { | 
 | 9407 |  | 
 | 9408 | 			crc = calc_crc (mclist->dmi_addr, ETH_ALEN); | 
 | 9409 | 			bit = ~crc & 0x7f; | 
 | 9410 | 			regidx = (bit & 0x60) >> 5; | 
 | 9411 | 			bit &= 0x1f; | 
 | 9412 | 			mc_filter[regidx] |= (1 << bit); | 
 | 9413 | 		} | 
 | 9414 |  | 
 | 9415 | 		tw32(MAC_HASH_REG_0, mc_filter[0]); | 
 | 9416 | 		tw32(MAC_HASH_REG_1, mc_filter[1]); | 
 | 9417 | 		tw32(MAC_HASH_REG_2, mc_filter[2]); | 
 | 9418 | 		tw32(MAC_HASH_REG_3, mc_filter[3]); | 
 | 9419 | 	} | 
 | 9420 |  | 
 | 9421 | 	if (rx_mode != tp->rx_mode) { | 
 | 9422 | 		tp->rx_mode = rx_mode; | 
 | 9423 | 		tw32_f(MAC_RX_MODE, rx_mode); | 
 | 9424 | 		udelay(10); | 
 | 9425 | 	} | 
 | 9426 | } | 
 | 9427 |  | 
 | 9428 | static void tg3_set_rx_mode(struct net_device *dev) | 
 | 9429 | { | 
 | 9430 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9431 |  | 
| Michael Chan | e75f7c9 | 2006-03-20 21:33:26 -0800 | [diff] [blame] | 9432 | 	if (!netif_running(dev)) | 
 | 9433 | 		return; | 
 | 9434 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9435 | 	tg3_full_lock(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9436 | 	__tg3_set_rx_mode(dev); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9437 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9438 | } | 
 | 9439 |  | 
 | 9440 | #define TG3_REGDUMP_LEN		(32 * 1024) | 
 | 9441 |  | 
 | 9442 | static int tg3_get_regs_len(struct net_device *dev) | 
 | 9443 | { | 
 | 9444 | 	return TG3_REGDUMP_LEN; | 
 | 9445 | } | 
 | 9446 |  | 
 | 9447 | static void tg3_get_regs(struct net_device *dev, | 
 | 9448 | 		struct ethtool_regs *regs, void *_p) | 
 | 9449 | { | 
 | 9450 | 	u32 *p = _p; | 
 | 9451 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9452 | 	u8 *orig_p = _p; | 
 | 9453 | 	int i; | 
 | 9454 |  | 
 | 9455 | 	regs->version = 0; | 
 | 9456 |  | 
 | 9457 | 	memset(p, 0, TG3_REGDUMP_LEN); | 
 | 9458 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 9459 | 	if (tp->link_config.phy_is_low_power) | 
 | 9460 | 		return; | 
 | 9461 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9462 | 	tg3_full_lock(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9463 |  | 
 | 9464 | #define __GET_REG32(reg)	(*(p)++ = tr32(reg)) | 
 | 9465 | #define GET_REG32_LOOP(base,len)		\ | 
 | 9466 | do {	p = (u32 *)(orig_p + (base));		\ | 
 | 9467 | 	for (i = 0; i < len; i += 4)		\ | 
 | 9468 | 		__GET_REG32((base) + i);	\ | 
 | 9469 | } while (0) | 
 | 9470 | #define GET_REG32_1(reg)			\ | 
 | 9471 | do {	p = (u32 *)(orig_p + (reg));		\ | 
 | 9472 | 	__GET_REG32((reg));			\ | 
 | 9473 | } while (0) | 
 | 9474 |  | 
 | 9475 | 	GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0); | 
 | 9476 | 	GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200); | 
 | 9477 | 	GET_REG32_LOOP(MAC_MODE, 0x4f0); | 
 | 9478 | 	GET_REG32_LOOP(SNDDATAI_MODE, 0xe0); | 
 | 9479 | 	GET_REG32_1(SNDDATAC_MODE); | 
 | 9480 | 	GET_REG32_LOOP(SNDBDS_MODE, 0x80); | 
 | 9481 | 	GET_REG32_LOOP(SNDBDI_MODE, 0x48); | 
 | 9482 | 	GET_REG32_1(SNDBDC_MODE); | 
 | 9483 | 	GET_REG32_LOOP(RCVLPC_MODE, 0x20); | 
 | 9484 | 	GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c); | 
 | 9485 | 	GET_REG32_LOOP(RCVDBDI_MODE, 0x0c); | 
 | 9486 | 	GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c); | 
 | 9487 | 	GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44); | 
 | 9488 | 	GET_REG32_1(RCVDCC_MODE); | 
 | 9489 | 	GET_REG32_LOOP(RCVBDI_MODE, 0x20); | 
 | 9490 | 	GET_REG32_LOOP(RCVCC_MODE, 0x14); | 
 | 9491 | 	GET_REG32_LOOP(RCVLSC_MODE, 0x08); | 
 | 9492 | 	GET_REG32_1(MBFREE_MODE); | 
 | 9493 | 	GET_REG32_LOOP(HOSTCC_MODE, 0x100); | 
 | 9494 | 	GET_REG32_LOOP(MEMARB_MODE, 0x10); | 
 | 9495 | 	GET_REG32_LOOP(BUFMGR_MODE, 0x58); | 
 | 9496 | 	GET_REG32_LOOP(RDMAC_MODE, 0x08); | 
 | 9497 | 	GET_REG32_LOOP(WDMAC_MODE, 0x08); | 
| Chris Elmquist | 091465d | 2005-12-20 13:25:19 -0800 | [diff] [blame] | 9498 | 	GET_REG32_1(RX_CPU_MODE); | 
 | 9499 | 	GET_REG32_1(RX_CPU_STATE); | 
 | 9500 | 	GET_REG32_1(RX_CPU_PGMCTR); | 
 | 9501 | 	GET_REG32_1(RX_CPU_HWBKPT); | 
 | 9502 | 	GET_REG32_1(TX_CPU_MODE); | 
 | 9503 | 	GET_REG32_1(TX_CPU_STATE); | 
 | 9504 | 	GET_REG32_1(TX_CPU_PGMCTR); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9505 | 	GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110); | 
 | 9506 | 	GET_REG32_LOOP(FTQ_RESET, 0x120); | 
 | 9507 | 	GET_REG32_LOOP(MSGINT_MODE, 0x0c); | 
 | 9508 | 	GET_REG32_1(DMAC_MODE); | 
 | 9509 | 	GET_REG32_LOOP(GRC_MODE, 0x4c); | 
 | 9510 | 	if (tp->tg3_flags & TG3_FLAG_NVRAM) | 
 | 9511 | 		GET_REG32_LOOP(NVRAM_CMD, 0x24); | 
 | 9512 |  | 
 | 9513 | #undef __GET_REG32 | 
 | 9514 | #undef GET_REG32_LOOP | 
 | 9515 | #undef GET_REG32_1 | 
 | 9516 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9517 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9518 | } | 
 | 9519 |  | 
 | 9520 | static int tg3_get_eeprom_len(struct net_device *dev) | 
 | 9521 | { | 
 | 9522 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9523 |  | 
 | 9524 | 	return tp->nvram_size; | 
 | 9525 | } | 
 | 9526 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9527 | static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) | 
 | 9528 | { | 
 | 9529 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9530 | 	int ret; | 
 | 9531 | 	u8  *pd; | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 9532 | 	u32 i, offset, len, b_offset, b_count; | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 9533 | 	__be32 val; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9534 |  | 
| Matt Carlson | df259d8 | 2009-04-20 06:57:14 +0000 | [diff] [blame] | 9535 | 	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) | 
 | 9536 | 		return -EINVAL; | 
 | 9537 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 9538 | 	if (tp->link_config.phy_is_low_power) | 
 | 9539 | 		return -EAGAIN; | 
 | 9540 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9541 | 	offset = eeprom->offset; | 
 | 9542 | 	len = eeprom->len; | 
 | 9543 | 	eeprom->len = 0; | 
 | 9544 |  | 
 | 9545 | 	eeprom->magic = TG3_EEPROM_MAGIC; | 
 | 9546 |  | 
 | 9547 | 	if (offset & 3) { | 
 | 9548 | 		/* adjustments to start on required 4 byte boundary */ | 
 | 9549 | 		b_offset = offset & 3; | 
 | 9550 | 		b_count = 4 - b_offset; | 
 | 9551 | 		if (b_count > len) { | 
 | 9552 | 			/* i.e. offset=1 len=2 */ | 
 | 9553 | 			b_count = len; | 
 | 9554 | 		} | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 9555 | 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9556 | 		if (ret) | 
 | 9557 | 			return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9558 | 		memcpy(data, ((char*)&val) + b_offset, b_count); | 
 | 9559 | 		len -= b_count; | 
 | 9560 | 		offset += b_count; | 
 | 9561 | 	        eeprom->len += b_count; | 
 | 9562 | 	} | 
 | 9563 |  | 
 | 9564 | 	/* read bytes upto the last 4 byte boundary */ | 
 | 9565 | 	pd = &data[eeprom->len]; | 
 | 9566 | 	for (i = 0; i < (len - (len & 3)); i += 4) { | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 9567 | 		ret = tg3_nvram_read_be32(tp, offset + i, &val); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9568 | 		if (ret) { | 
 | 9569 | 			eeprom->len += i; | 
 | 9570 | 			return ret; | 
 | 9571 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9572 | 		memcpy(pd + i, &val, 4); | 
 | 9573 | 	} | 
 | 9574 | 	eeprom->len += i; | 
 | 9575 |  | 
 | 9576 | 	if (len & 3) { | 
 | 9577 | 		/* read last bytes not ending on 4 byte boundary */ | 
 | 9578 | 		pd = &data[eeprom->len]; | 
 | 9579 | 		b_count = len & 3; | 
 | 9580 | 		b_offset = offset + len - b_count; | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 9581 | 		ret = tg3_nvram_read_be32(tp, b_offset, &val); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9582 | 		if (ret) | 
 | 9583 | 			return ret; | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 9584 | 		memcpy(pd, &val, b_count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9585 | 		eeprom->len += b_count; | 
 | 9586 | 	} | 
 | 9587 | 	return 0; | 
 | 9588 | } | 
 | 9589 |  | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9590 | static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9591 |  | 
 | 9592 | static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) | 
 | 9593 | { | 
 | 9594 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9595 | 	int ret; | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 9596 | 	u32 offset, len, b_offset, odd_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9597 | 	u8 *buf; | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 9598 | 	__be32 start, end; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9599 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 9600 | 	if (tp->link_config.phy_is_low_power) | 
 | 9601 | 		return -EAGAIN; | 
 | 9602 |  | 
| Matt Carlson | df259d8 | 2009-04-20 06:57:14 +0000 | [diff] [blame] | 9603 | 	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || | 
 | 9604 | 	    eeprom->magic != TG3_EEPROM_MAGIC) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9605 | 		return -EINVAL; | 
 | 9606 |  | 
 | 9607 | 	offset = eeprom->offset; | 
 | 9608 | 	len = eeprom->len; | 
 | 9609 |  | 
 | 9610 | 	if ((b_offset = (offset & 3))) { | 
 | 9611 | 		/* adjustments to start on required 4 byte boundary */ | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 9612 | 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9613 | 		if (ret) | 
 | 9614 | 			return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9615 | 		len += b_offset; | 
 | 9616 | 		offset &= ~3; | 
| Michael Chan | 1c8594b | 2005-04-21 17:12:46 -0700 | [diff] [blame] | 9617 | 		if (len < 4) | 
 | 9618 | 			len = 4; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9619 | 	} | 
 | 9620 |  | 
 | 9621 | 	odd_len = 0; | 
| Michael Chan | 1c8594b | 2005-04-21 17:12:46 -0700 | [diff] [blame] | 9622 | 	if (len & 3) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9623 | 		/* adjustments to end on required 4 byte boundary */ | 
 | 9624 | 		odd_len = 1; | 
 | 9625 | 		len = (len + 3) & ~3; | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 9626 | 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9627 | 		if (ret) | 
 | 9628 | 			return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9629 | 	} | 
 | 9630 |  | 
 | 9631 | 	buf = data; | 
 | 9632 | 	if (b_offset || odd_len) { | 
 | 9633 | 		buf = kmalloc(len, GFP_KERNEL); | 
| Andy Gospodarek | ab0049b | 2007-09-06 20:42:14 +0100 | [diff] [blame] | 9634 | 		if (!buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9635 | 			return -ENOMEM; | 
 | 9636 | 		if (b_offset) | 
 | 9637 | 			memcpy(buf, &start, 4); | 
 | 9638 | 		if (odd_len) | 
 | 9639 | 			memcpy(buf+len-4, &end, 4); | 
 | 9640 | 		memcpy(buf + b_offset, data, eeprom->len); | 
 | 9641 | 	} | 
 | 9642 |  | 
 | 9643 | 	ret = tg3_nvram_write_block(tp, offset, len, buf); | 
 | 9644 |  | 
 | 9645 | 	if (buf != data) | 
 | 9646 | 		kfree(buf); | 
 | 9647 |  | 
 | 9648 | 	return ret; | 
 | 9649 | } | 
 | 9650 |  | 
 | 9651 | static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
 | 9652 | { | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9653 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9654 |  | 
 | 9655 | 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 9656 | 		struct phy_device *phydev; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9657 | 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 
 | 9658 | 			return -EAGAIN; | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 9659 | 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
 | 9660 | 		return phy_ethtool_gset(phydev, cmd); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9661 | 	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9662 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9663 | 	cmd->supported = (SUPPORTED_Autoneg); | 
 | 9664 |  | 
 | 9665 | 	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) | 
 | 9666 | 		cmd->supported |= (SUPPORTED_1000baseT_Half | | 
 | 9667 | 				   SUPPORTED_1000baseT_Full); | 
 | 9668 |  | 
| Karsten Keil | ef34814 | 2006-05-12 12:49:08 -0700 | [diff] [blame] | 9669 | 	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9670 | 		cmd->supported |= (SUPPORTED_100baseT_Half | | 
 | 9671 | 				  SUPPORTED_100baseT_Full | | 
 | 9672 | 				  SUPPORTED_10baseT_Half | | 
 | 9673 | 				  SUPPORTED_10baseT_Full | | 
| Matt Carlson | 3bebab5 | 2007-11-12 21:22:40 -0800 | [diff] [blame] | 9674 | 				  SUPPORTED_TP); | 
| Karsten Keil | ef34814 | 2006-05-12 12:49:08 -0700 | [diff] [blame] | 9675 | 		cmd->port = PORT_TP; | 
 | 9676 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9677 | 		cmd->supported |= SUPPORTED_FIBRE; | 
| Karsten Keil | ef34814 | 2006-05-12 12:49:08 -0700 | [diff] [blame] | 9678 | 		cmd->port = PORT_FIBRE; | 
 | 9679 | 	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9680 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9681 | 	cmd->advertising = tp->link_config.advertising; | 
 | 9682 | 	if (netif_running(dev)) { | 
 | 9683 | 		cmd->speed = tp->link_config.active_speed; | 
 | 9684 | 		cmd->duplex = tp->link_config.active_duplex; | 
 | 9685 | 	} | 
| Matt Carlson | 882e979 | 2009-09-01 13:21:36 +0000 | [diff] [blame] | 9686 | 	cmd->phy_address = tp->phy_addr; | 
| Matt Carlson | 7e5856b | 2009-02-25 14:23:01 +0000 | [diff] [blame] | 9687 | 	cmd->transceiver = XCVR_INTERNAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9688 | 	cmd->autoneg = tp->link_config.autoneg; | 
 | 9689 | 	cmd->maxtxpkt = 0; | 
 | 9690 | 	cmd->maxrxpkt = 0; | 
 | 9691 | 	return 0; | 
 | 9692 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9693 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9694 | static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
 | 9695 | { | 
 | 9696 | 	struct tg3 *tp = netdev_priv(dev); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9697 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9698 | 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 9699 | 		struct phy_device *phydev; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9700 | 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 
 | 9701 | 			return -EAGAIN; | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 9702 | 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
 | 9703 | 		return phy_ethtool_sset(phydev, cmd); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9704 | 	} | 
 | 9705 |  | 
| Matt Carlson | 7e5856b | 2009-02-25 14:23:01 +0000 | [diff] [blame] | 9706 | 	if (cmd->autoneg != AUTONEG_ENABLE && | 
 | 9707 | 	    cmd->autoneg != AUTONEG_DISABLE) | 
| Michael Chan | 37ff238 | 2005-10-26 15:49:51 -0700 | [diff] [blame] | 9708 | 		return -EINVAL; | 
| Matt Carlson | 7e5856b | 2009-02-25 14:23:01 +0000 | [diff] [blame] | 9709 |  | 
 | 9710 | 	if (cmd->autoneg == AUTONEG_DISABLE && | 
 | 9711 | 	    cmd->duplex != DUPLEX_FULL && | 
 | 9712 | 	    cmd->duplex != DUPLEX_HALF) | 
| Michael Chan | 37ff238 | 2005-10-26 15:49:51 -0700 | [diff] [blame] | 9713 | 		return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9714 |  | 
| Matt Carlson | 7e5856b | 2009-02-25 14:23:01 +0000 | [diff] [blame] | 9715 | 	if (cmd->autoneg == AUTONEG_ENABLE) { | 
 | 9716 | 		u32 mask = ADVERTISED_Autoneg | | 
 | 9717 | 			   ADVERTISED_Pause | | 
 | 9718 | 			   ADVERTISED_Asym_Pause; | 
 | 9719 |  | 
 | 9720 | 		if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY)) | 
 | 9721 | 			mask |= ADVERTISED_1000baseT_Half | | 
 | 9722 | 				ADVERTISED_1000baseT_Full; | 
 | 9723 |  | 
 | 9724 | 		if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) | 
 | 9725 | 			mask |= ADVERTISED_100baseT_Half | | 
 | 9726 | 				ADVERTISED_100baseT_Full | | 
 | 9727 | 				ADVERTISED_10baseT_Half | | 
 | 9728 | 				ADVERTISED_10baseT_Full | | 
 | 9729 | 				ADVERTISED_TP; | 
 | 9730 | 		else | 
 | 9731 | 			mask |= ADVERTISED_FIBRE; | 
 | 9732 |  | 
 | 9733 | 		if (cmd->advertising & ~mask) | 
 | 9734 | 			return -EINVAL; | 
 | 9735 |  | 
 | 9736 | 		mask &= (ADVERTISED_1000baseT_Half | | 
 | 9737 | 			 ADVERTISED_1000baseT_Full | | 
 | 9738 | 			 ADVERTISED_100baseT_Half | | 
 | 9739 | 			 ADVERTISED_100baseT_Full | | 
 | 9740 | 			 ADVERTISED_10baseT_Half | | 
 | 9741 | 			 ADVERTISED_10baseT_Full); | 
 | 9742 |  | 
 | 9743 | 		cmd->advertising &= mask; | 
 | 9744 | 	} else { | 
 | 9745 | 		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { | 
 | 9746 | 			if (cmd->speed != SPEED_1000) | 
 | 9747 | 				return -EINVAL; | 
 | 9748 |  | 
 | 9749 | 			if (cmd->duplex != DUPLEX_FULL) | 
 | 9750 | 				return -EINVAL; | 
 | 9751 | 		} else { | 
 | 9752 | 			if (cmd->speed != SPEED_100 && | 
 | 9753 | 			    cmd->speed != SPEED_10) | 
 | 9754 | 				return -EINVAL; | 
 | 9755 | 		} | 
 | 9756 | 	} | 
 | 9757 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9758 | 	tg3_full_lock(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9759 |  | 
 | 9760 | 	tp->link_config.autoneg = cmd->autoneg; | 
 | 9761 | 	if (cmd->autoneg == AUTONEG_ENABLE) { | 
| Andy Gospodarek | 405d8e5 | 2007-10-08 01:08:47 -0700 | [diff] [blame] | 9762 | 		tp->link_config.advertising = (cmd->advertising | | 
 | 9763 | 					      ADVERTISED_Autoneg); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9764 | 		tp->link_config.speed = SPEED_INVALID; | 
 | 9765 | 		tp->link_config.duplex = DUPLEX_INVALID; | 
 | 9766 | 	} else { | 
 | 9767 | 		tp->link_config.advertising = 0; | 
 | 9768 | 		tp->link_config.speed = cmd->speed; | 
 | 9769 | 		tp->link_config.duplex = cmd->duplex; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9770 | 	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9771 |  | 
| Michael Chan | 24fcad6 | 2006-12-17 17:06:46 -0800 | [diff] [blame] | 9772 | 	tp->link_config.orig_speed = tp->link_config.speed; | 
 | 9773 | 	tp->link_config.orig_duplex = tp->link_config.duplex; | 
 | 9774 | 	tp->link_config.orig_autoneg = tp->link_config.autoneg; | 
 | 9775 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9776 | 	if (netif_running(dev)) | 
 | 9777 | 		tg3_setup_phy(tp, 1); | 
 | 9778 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9779 | 	tg3_full_unlock(tp); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9780 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9781 | 	return 0; | 
 | 9782 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9783 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9784 | static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 
 | 9785 | { | 
 | 9786 | 	struct tg3 *tp = netdev_priv(dev); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9787 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9788 | 	strcpy(info->driver, DRV_MODULE_NAME); | 
 | 9789 | 	strcpy(info->version, DRV_MODULE_VERSION); | 
| Michael Chan | c4e6575 | 2006-03-20 22:29:32 -0800 | [diff] [blame] | 9790 | 	strcpy(info->fw_version, tp->fw_ver); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9791 | 	strcpy(info->bus_info, pci_name(tp->pdev)); | 
 | 9792 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9793 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9794 | static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 
 | 9795 | { | 
 | 9796 | 	struct tg3 *tp = netdev_priv(dev); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9797 |  | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 9798 | 	if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && | 
 | 9799 | 	    device_can_wakeup(&tp->pdev->dev)) | 
| Gary Zambrano | a85feb8 | 2007-05-05 11:52:19 -0700 | [diff] [blame] | 9800 | 		wol->supported = WAKE_MAGIC; | 
 | 9801 | 	else | 
 | 9802 | 		wol->supported = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9803 | 	wol->wolopts = 0; | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 9804 | 	if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && | 
 | 9805 | 	    device_can_wakeup(&tp->pdev->dev)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9806 | 		wol->wolopts = WAKE_MAGIC; | 
 | 9807 | 	memset(&wol->sopass, 0, sizeof(wol->sopass)); | 
 | 9808 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9809 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9810 | static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 
 | 9811 | { | 
 | 9812 | 	struct tg3 *tp = netdev_priv(dev); | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 9813 | 	struct device *dp = &tp->pdev->dev; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9814 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9815 | 	if (wol->wolopts & ~WAKE_MAGIC) | 
 | 9816 | 		return -EINVAL; | 
 | 9817 | 	if ((wol->wolopts & WAKE_MAGIC) && | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 9818 | 	    !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9819 | 		return -EINVAL; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9820 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9821 | 	spin_lock_bh(&tp->lock); | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 9822 | 	if (wol->wolopts & WAKE_MAGIC) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9823 | 		tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 9824 | 		device_set_wakeup_enable(dp, true); | 
 | 9825 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9826 | 		tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 9827 | 		device_set_wakeup_enable(dp, false); | 
 | 9828 | 	} | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9829 | 	spin_unlock_bh(&tp->lock); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9830 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9831 | 	return 0; | 
 | 9832 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9833 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9834 | static u32 tg3_get_msglevel(struct net_device *dev) | 
 | 9835 | { | 
 | 9836 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9837 | 	return tp->msg_enable; | 
 | 9838 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9839 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9840 | static void tg3_set_msglevel(struct net_device *dev, u32 value) | 
 | 9841 | { | 
 | 9842 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9843 | 	tp->msg_enable = value; | 
 | 9844 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9845 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9846 | static int tg3_set_tso(struct net_device *dev, u32 value) | 
 | 9847 | { | 
 | 9848 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 9849 |  | 
 | 9850 | 	if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { | 
 | 9851 | 		if (value) | 
 | 9852 | 			return -EINVAL; | 
 | 9853 | 		return 0; | 
 | 9854 | 	} | 
| Matt Carlson | 027455a | 2008-12-21 20:19:30 -0800 | [diff] [blame] | 9855 | 	if ((dev->features & NETIF_F_IPV6_CSUM) && | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 9856 | 	    ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || | 
 | 9857 | 	     (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 9858 | 		if (value) { | 
| Michael Chan | b002662 | 2006-07-03 19:42:14 -0700 | [diff] [blame] | 9859 | 			dev->features |= NETIF_F_TSO6; | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 9860 | 			if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || | 
 | 9861 | 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 
| Matt Carlson | 57e6983 | 2008-05-25 23:48:31 -0700 | [diff] [blame] | 9862 | 			    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 
 | 9863 | 			     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 9864 | 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 9865 | 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 9866 | 				dev->features |= NETIF_F_TSO_ECN; | 
 | 9867 | 		} else | 
 | 9868 | 			dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); | 
| Michael Chan | b002662 | 2006-07-03 19:42:14 -0700 | [diff] [blame] | 9869 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9870 | 	return ethtool_op_set_tso(dev, value); | 
 | 9871 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9872 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9873 | static int tg3_nway_reset(struct net_device *dev) | 
 | 9874 | { | 
 | 9875 | 	struct tg3 *tp = netdev_priv(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9876 | 	int r; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9877 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9878 | 	if (!netif_running(dev)) | 
 | 9879 | 		return -EAGAIN; | 
 | 9880 |  | 
| Michael Chan | c94e394 | 2005-09-27 12:12:42 -0700 | [diff] [blame] | 9881 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) | 
 | 9882 | 		return -EINVAL; | 
 | 9883 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9884 | 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 
 | 9885 | 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 
 | 9886 | 			return -EAGAIN; | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 9887 | 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9888 | 	} else { | 
 | 9889 | 		u32 bmcr; | 
 | 9890 |  | 
 | 9891 | 		spin_lock_bh(&tp->lock); | 
 | 9892 | 		r = -EINVAL; | 
 | 9893 | 		tg3_readphy(tp, MII_BMCR, &bmcr); | 
 | 9894 | 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) && | 
 | 9895 | 		    ((bmcr & BMCR_ANENABLE) || | 
 | 9896 | 		     (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { | 
 | 9897 | 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | | 
 | 9898 | 						   BMCR_ANENABLE); | 
 | 9899 | 			r = 0; | 
 | 9900 | 		} | 
 | 9901 | 		spin_unlock_bh(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9902 | 	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9903 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9904 | 	return r; | 
 | 9905 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9906 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9907 | static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 
 | 9908 | { | 
 | 9909 | 	struct tg3 *tp = netdev_priv(dev); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9910 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9911 | 	ering->rx_max_pending = TG3_RX_RING_SIZE - 1; | 
 | 9912 | 	ering->rx_mini_max_pending = 0; | 
| Michael Chan | 4f81c32 | 2006-03-20 21:33:42 -0800 | [diff] [blame] | 9913 | 	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) | 
 | 9914 | 		ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; | 
 | 9915 | 	else | 
 | 9916 | 		ering->rx_jumbo_max_pending = 0; | 
 | 9917 |  | 
 | 9918 | 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9919 |  | 
 | 9920 | 	ering->rx_pending = tp->rx_pending; | 
 | 9921 | 	ering->rx_mini_pending = 0; | 
| Michael Chan | 4f81c32 | 2006-03-20 21:33:42 -0800 | [diff] [blame] | 9922 | 	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) | 
 | 9923 | 		ering->rx_jumbo_pending = tp->rx_jumbo_pending; | 
 | 9924 | 	else | 
 | 9925 | 		ering->rx_jumbo_pending = 0; | 
 | 9926 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 9927 | 	ering->tx_pending = tp->napi[0].tx_pending; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9928 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9929 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9930 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 
 | 9931 | { | 
 | 9932 | 	struct tg3 *tp = netdev_priv(dev); | 
| Matt Carlson | 646c9ed | 2009-09-01 12:58:41 +0000 | [diff] [blame] | 9933 | 	int i, irq_sync = 0, err = 0; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9934 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9935 | 	if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || | 
 | 9936 | 	    (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || | 
| Michael Chan | bc3a925 | 2006-10-18 20:55:18 -0700 | [diff] [blame] | 9937 | 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) || | 
 | 9938 | 	    (ering->tx_pending <= MAX_SKB_FRAGS) || | 
| Michael Chan | 7f62ad5 | 2007-02-20 23:25:40 -0800 | [diff] [blame] | 9939 | 	    ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && | 
| Michael Chan | bc3a925 | 2006-10-18 20:55:18 -0700 | [diff] [blame] | 9940 | 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9941 | 		return -EINVAL; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9942 |  | 
| Michael Chan | bbe832c | 2005-06-24 20:20:04 -0700 | [diff] [blame] | 9943 | 	if (netif_running(dev)) { | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9944 | 		tg3_phy_stop(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9945 | 		tg3_netif_stop(tp); | 
| Michael Chan | bbe832c | 2005-06-24 20:20:04 -0700 | [diff] [blame] | 9946 | 		irq_sync = 1; | 
 | 9947 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9948 |  | 
| Michael Chan | bbe832c | 2005-06-24 20:20:04 -0700 | [diff] [blame] | 9949 | 	tg3_full_lock(tp, irq_sync); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9950 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9951 | 	tp->rx_pending = ering->rx_pending; | 
 | 9952 |  | 
 | 9953 | 	if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) && | 
 | 9954 | 	    tp->rx_pending > 63) | 
 | 9955 | 		tp->rx_pending = 63; | 
 | 9956 | 	tp->rx_jumbo_pending = ering->rx_jumbo_pending; | 
| Matt Carlson | 646c9ed | 2009-09-01 12:58:41 +0000 | [diff] [blame] | 9957 |  | 
 | 9958 | 	for (i = 0; i < TG3_IRQ_MAX_VECS; i++) | 
 | 9959 | 		tp->napi[i].tx_pending = ering->tx_pending; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9960 |  | 
 | 9961 | 	if (netif_running(dev)) { | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 9962 | 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 9963 | 		err = tg3_restart_hw(tp, 1); | 
 | 9964 | 		if (!err) | 
 | 9965 | 			tg3_netif_start(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9966 | 	} | 
 | 9967 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 9968 | 	tg3_full_unlock(tp); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9969 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9970 | 	if (irq_sync && !err) | 
 | 9971 | 		tg3_phy_start(tp); | 
 | 9972 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 9973 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9974 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9975 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9976 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 
 | 9977 | { | 
 | 9978 | 	struct tg3 *tp = netdev_priv(dev); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9979 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9980 | 	epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; | 
| Matt Carlson | 8d01862 | 2007-12-20 20:05:44 -0800 | [diff] [blame] | 9981 |  | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 9982 | 	if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) | 
| Matt Carlson | 8d01862 | 2007-12-20 20:05:44 -0800 | [diff] [blame] | 9983 | 		epause->rx_pause = 1; | 
 | 9984 | 	else | 
 | 9985 | 		epause->rx_pause = 0; | 
 | 9986 |  | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 9987 | 	if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) | 
| Matt Carlson | 8d01862 | 2007-12-20 20:05:44 -0800 | [diff] [blame] | 9988 | 		epause->tx_pause = 1; | 
 | 9989 | 	else | 
 | 9990 | 		epause->tx_pause = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9991 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9992 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9993 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 
 | 9994 | { | 
 | 9995 | 	struct tg3 *tp = netdev_priv(dev); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9996 | 	int err = 0; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 9997 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 9998 | 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 
 | 9999 | 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 
 | 10000 | 			return -EAGAIN; | 
 | 10001 |  | 
 | 10002 | 		if (epause->autoneg) { | 
 | 10003 | 			u32 newadv; | 
 | 10004 | 			struct phy_device *phydev; | 
 | 10005 |  | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 10006 | 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10007 |  | 
 | 10008 | 			if (epause->rx_pause) { | 
 | 10009 | 				if (epause->tx_pause) | 
 | 10010 | 					newadv = ADVERTISED_Pause; | 
 | 10011 | 				else | 
 | 10012 | 					newadv = ADVERTISED_Pause | | 
 | 10013 | 						 ADVERTISED_Asym_Pause; | 
 | 10014 | 			} else if (epause->tx_pause) { | 
 | 10015 | 				newadv = ADVERTISED_Asym_Pause; | 
 | 10016 | 			} else | 
 | 10017 | 				newadv = 0; | 
 | 10018 |  | 
 | 10019 | 			if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { | 
 | 10020 | 				u32 oldadv = phydev->advertising & | 
 | 10021 | 					     (ADVERTISED_Pause | | 
 | 10022 | 					      ADVERTISED_Asym_Pause); | 
 | 10023 | 				if (oldadv != newadv) { | 
 | 10024 | 					phydev->advertising &= | 
 | 10025 | 						~(ADVERTISED_Pause | | 
 | 10026 | 						  ADVERTISED_Asym_Pause); | 
 | 10027 | 					phydev->advertising |= newadv; | 
 | 10028 | 					err = phy_start_aneg(phydev); | 
 | 10029 | 				} | 
 | 10030 | 			} else { | 
 | 10031 | 				tp->link_config.advertising &= | 
 | 10032 | 						~(ADVERTISED_Pause | | 
 | 10033 | 						  ADVERTISED_Asym_Pause); | 
 | 10034 | 				tp->link_config.advertising |= newadv; | 
 | 10035 | 			} | 
 | 10036 | 		} else { | 
 | 10037 | 			if (epause->rx_pause) | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 10038 | 				tp->link_config.flowctrl |= FLOW_CTRL_RX; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10039 | 			else | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 10040 | 				tp->link_config.flowctrl &= ~FLOW_CTRL_RX; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10041 |  | 
 | 10042 | 			if (epause->tx_pause) | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 10043 | 				tp->link_config.flowctrl |= FLOW_CTRL_TX; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10044 | 			else | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 10045 | 				tp->link_config.flowctrl &= ~FLOW_CTRL_TX; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10046 |  | 
 | 10047 | 			if (netif_running(dev)) | 
 | 10048 | 				tg3_setup_flow_control(tp, 0, 0); | 
 | 10049 | 		} | 
 | 10050 | 	} else { | 
 | 10051 | 		int irq_sync = 0; | 
 | 10052 |  | 
 | 10053 | 		if (netif_running(dev)) { | 
 | 10054 | 			tg3_netif_stop(tp); | 
 | 10055 | 			irq_sync = 1; | 
 | 10056 | 		} | 
 | 10057 |  | 
 | 10058 | 		tg3_full_lock(tp, irq_sync); | 
 | 10059 |  | 
 | 10060 | 		if (epause->autoneg) | 
 | 10061 | 			tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 
 | 10062 | 		else | 
 | 10063 | 			tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; | 
 | 10064 | 		if (epause->rx_pause) | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 10065 | 			tp->link_config.flowctrl |= FLOW_CTRL_RX; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10066 | 		else | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 10067 | 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10068 | 		if (epause->tx_pause) | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 10069 | 			tp->link_config.flowctrl |= FLOW_CTRL_TX; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10070 | 		else | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 10071 | 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10072 |  | 
 | 10073 | 		if (netif_running(dev)) { | 
 | 10074 | 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
 | 10075 | 			err = tg3_restart_hw(tp, 1); | 
 | 10076 | 			if (!err) | 
 | 10077 | 				tg3_netif_start(tp); | 
 | 10078 | 		} | 
 | 10079 |  | 
 | 10080 | 		tg3_full_unlock(tp); | 
| Michael Chan | bbe832c | 2005-06-24 20:20:04 -0700 | [diff] [blame] | 10081 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10082 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 10083 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10084 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10085 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10086 | static u32 tg3_get_rx_csum(struct net_device *dev) | 
 | 10087 | { | 
 | 10088 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 10089 | 	return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0; | 
 | 10090 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10091 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10092 | static int tg3_set_rx_csum(struct net_device *dev, u32 data) | 
 | 10093 | { | 
 | 10094 | 	struct tg3 *tp = netdev_priv(dev); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10095 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10096 | 	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { | 
 | 10097 | 		if (data != 0) | 
 | 10098 | 			return -EINVAL; | 
 | 10099 |   		return 0; | 
 | 10100 |   	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10101 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 10102 | 	spin_lock_bh(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10103 | 	if (data) | 
 | 10104 | 		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; | 
 | 10105 | 	else | 
 | 10106 | 		tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 10107 | 	spin_unlock_bh(&tp->lock); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10108 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10109 | 	return 0; | 
 | 10110 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10111 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10112 | static int tg3_set_tx_csum(struct net_device *dev, u32 data) | 
 | 10113 | { | 
 | 10114 | 	struct tg3 *tp = netdev_priv(dev); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10115 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10116 | 	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { | 
 | 10117 | 		if (data != 0) | 
 | 10118 | 			return -EINVAL; | 
 | 10119 |   		return 0; | 
 | 10120 |   	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10121 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 10122 | 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 
| Michael Chan | 6460d94 | 2007-07-14 19:07:52 -0700 | [diff] [blame] | 10123 | 		ethtool_op_set_tx_ipv6_csum(dev, data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10124 | 	else | 
| Michael Chan | 9c27dbd | 2006-03-20 22:28:27 -0800 | [diff] [blame] | 10125 | 		ethtool_op_set_tx_csum(dev, data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10126 |  | 
 | 10127 | 	return 0; | 
 | 10128 | } | 
 | 10129 |  | 
| Jeff Garzik | b9f2c04 | 2007-10-03 18:07:32 -0700 | [diff] [blame] | 10130 | static int tg3_get_sset_count (struct net_device *dev, int sset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10131 | { | 
| Jeff Garzik | b9f2c04 | 2007-10-03 18:07:32 -0700 | [diff] [blame] | 10132 | 	switch (sset) { | 
 | 10133 | 	case ETH_SS_TEST: | 
 | 10134 | 		return TG3_NUM_TEST; | 
 | 10135 | 	case ETH_SS_STATS: | 
 | 10136 | 		return TG3_NUM_STATS; | 
 | 10137 | 	default: | 
 | 10138 | 		return -EOPNOTSUPP; | 
 | 10139 | 	} | 
| Michael Chan | 4cafd3f | 2005-05-29 14:56:34 -0700 | [diff] [blame] | 10140 | } | 
 | 10141 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10142 | static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) | 
 | 10143 | { | 
 | 10144 | 	switch (stringset) { | 
 | 10145 | 	case ETH_SS_STATS: | 
 | 10146 | 		memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); | 
 | 10147 | 		break; | 
| Michael Chan | 4cafd3f | 2005-05-29 14:56:34 -0700 | [diff] [blame] | 10148 | 	case ETH_SS_TEST: | 
 | 10149 | 		memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); | 
 | 10150 | 		break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10151 | 	default: | 
 | 10152 | 		WARN_ON(1);	/* we need a WARN() */ | 
 | 10153 | 		break; | 
 | 10154 | 	} | 
 | 10155 | } | 
 | 10156 |  | 
| Michael Chan | 4009a93 | 2005-09-05 17:52:54 -0700 | [diff] [blame] | 10157 | static int tg3_phys_id(struct net_device *dev, u32 data) | 
 | 10158 | { | 
 | 10159 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 10160 | 	int i; | 
 | 10161 |  | 
 | 10162 | 	if (!netif_running(tp->dev)) | 
 | 10163 | 		return -EAGAIN; | 
 | 10164 |  | 
 | 10165 | 	if (data == 0) | 
| Stephen Hemminger | 759afc3 | 2008-02-23 19:51:59 -0800 | [diff] [blame] | 10166 | 		data = UINT_MAX / 2; | 
| Michael Chan | 4009a93 | 2005-09-05 17:52:54 -0700 | [diff] [blame] | 10167 |  | 
 | 10168 | 	for (i = 0; i < (data * 2); i++) { | 
 | 10169 | 		if ((i % 2) == 0) | 
 | 10170 | 			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | | 
 | 10171 | 					   LED_CTRL_1000MBPS_ON | | 
 | 10172 | 					   LED_CTRL_100MBPS_ON | | 
 | 10173 | 					   LED_CTRL_10MBPS_ON | | 
 | 10174 | 					   LED_CTRL_TRAFFIC_OVERRIDE | | 
 | 10175 | 					   LED_CTRL_TRAFFIC_BLINK | | 
 | 10176 | 					   LED_CTRL_TRAFFIC_LED); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10177 |  | 
| Michael Chan | 4009a93 | 2005-09-05 17:52:54 -0700 | [diff] [blame] | 10178 | 		else | 
 | 10179 | 			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | | 
 | 10180 | 					   LED_CTRL_TRAFFIC_OVERRIDE); | 
 | 10181 |  | 
 | 10182 | 		if (msleep_interruptible(500)) | 
 | 10183 | 			break; | 
 | 10184 | 	} | 
 | 10185 | 	tw32(MAC_LED_CTRL, tp->led_ctrl); | 
 | 10186 | 	return 0; | 
 | 10187 | } | 
 | 10188 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10189 | static void tg3_get_ethtool_stats (struct net_device *dev, | 
 | 10190 | 				   struct ethtool_stats *estats, u64 *tmp_stats) | 
 | 10191 | { | 
 | 10192 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 10193 | 	memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); | 
 | 10194 | } | 
 | 10195 |  | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10196 | #define NVRAM_TEST_SIZE 0x100 | 
| Matt Carlson | a5767de | 2007-11-12 21:10:58 -0800 | [diff] [blame] | 10197 | #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14 | 
 | 10198 | #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18 | 
 | 10199 | #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10200 | #define NVRAM_SELFBOOT_HW_SIZE 0x20 | 
 | 10201 | #define NVRAM_SELFBOOT_DATA_SIZE 0x1c | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10202 |  | 
 | 10203 | static int tg3_test_nvram(struct tg3 *tp) | 
 | 10204 | { | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 10205 | 	u32 csum, magic; | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 10206 | 	__be32 *buf; | 
| Andy Gospodarek | ab0049b | 2007-09-06 20:42:14 +0100 | [diff] [blame] | 10207 | 	int i, j, k, err = 0, size; | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10208 |  | 
| Matt Carlson | df259d8 | 2009-04-20 06:57:14 +0000 | [diff] [blame] | 10209 | 	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) | 
 | 10210 | 		return 0; | 
 | 10211 |  | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 10212 | 	if (tg3_nvram_read(tp, 0, &magic) != 0) | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10213 | 		return -EIO; | 
 | 10214 |  | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10215 | 	if (magic == TG3_EEPROM_MAGIC) | 
 | 10216 | 		size = NVRAM_TEST_SIZE; | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10217 | 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { | 
| Matt Carlson | a5767de | 2007-11-12 21:10:58 -0800 | [diff] [blame] | 10218 | 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == | 
 | 10219 | 		    TG3_EEPROM_SB_FORMAT_1) { | 
 | 10220 | 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) { | 
 | 10221 | 			case TG3_EEPROM_SB_REVISION_0: | 
 | 10222 | 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; | 
 | 10223 | 				break; | 
 | 10224 | 			case TG3_EEPROM_SB_REVISION_2: | 
 | 10225 | 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; | 
 | 10226 | 				break; | 
 | 10227 | 			case TG3_EEPROM_SB_REVISION_3: | 
 | 10228 | 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; | 
 | 10229 | 				break; | 
 | 10230 | 			default: | 
 | 10231 | 				return 0; | 
 | 10232 | 			} | 
 | 10233 | 		} else | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10234 | 			return 0; | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10235 | 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) | 
 | 10236 | 		size = NVRAM_SELFBOOT_HW_SIZE; | 
 | 10237 | 	else | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10238 | 		return -EIO; | 
 | 10239 |  | 
 | 10240 | 	buf = kmalloc(size, GFP_KERNEL); | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10241 | 	if (buf == NULL) | 
 | 10242 | 		return -ENOMEM; | 
 | 10243 |  | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10244 | 	err = -EIO; | 
 | 10245 | 	for (i = 0, j = 0; i < size; i += 4, j++) { | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 10246 | 		err = tg3_nvram_read_be32(tp, i, &buf[j]); | 
 | 10247 | 		if (err) | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10248 | 			break; | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10249 | 	} | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10250 | 	if (i < size) | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10251 | 		goto out; | 
 | 10252 |  | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10253 | 	/* Selfboot format */ | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 10254 | 	magic = be32_to_cpu(buf[0]); | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 10255 | 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10256 | 	    TG3_EEPROM_MAGIC_FW) { | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10257 | 		u8 *buf8 = (u8 *) buf, csum8 = 0; | 
 | 10258 |  | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 10259 | 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) == | 
| Matt Carlson | a5767de | 2007-11-12 21:10:58 -0800 | [diff] [blame] | 10260 | 		    TG3_EEPROM_SB_REVISION_2) { | 
 | 10261 | 			/* For rev 2, the csum doesn't include the MBA. */ | 
 | 10262 | 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) | 
 | 10263 | 				csum8 += buf8[i]; | 
 | 10264 | 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) | 
 | 10265 | 				csum8 += buf8[i]; | 
 | 10266 | 		} else { | 
 | 10267 | 			for (i = 0; i < size; i++) | 
 | 10268 | 				csum8 += buf8[i]; | 
 | 10269 | 		} | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10270 |  | 
| Adrian Bunk | ad96b48 | 2006-04-05 22:21:04 -0700 | [diff] [blame] | 10271 | 		if (csum8 == 0) { | 
 | 10272 | 			err = 0; | 
 | 10273 | 			goto out; | 
 | 10274 | 		} | 
 | 10275 |  | 
 | 10276 | 		err = -EIO; | 
 | 10277 | 		goto out; | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 10278 | 	} | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10279 |  | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 10280 | 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10281 | 	    TG3_EEPROM_MAGIC_HW) { | 
 | 10282 | 		u8 data[NVRAM_SELFBOOT_DATA_SIZE]; | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 10283 | 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10284 | 		u8 *buf8 = (u8 *) buf; | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10285 |  | 
 | 10286 | 		/* Separate the parity bits and the data bytes.  */ | 
 | 10287 | 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { | 
 | 10288 | 			if ((i == 0) || (i == 8)) { | 
 | 10289 | 				int l; | 
 | 10290 | 				u8 msk; | 
 | 10291 |  | 
 | 10292 | 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) | 
 | 10293 | 					parity[k++] = buf8[i] & msk; | 
 | 10294 | 				i++; | 
 | 10295 | 			} | 
 | 10296 | 			else if (i == 16) { | 
 | 10297 | 				int l; | 
 | 10298 | 				u8 msk; | 
 | 10299 |  | 
 | 10300 | 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) | 
 | 10301 | 					parity[k++] = buf8[i] & msk; | 
 | 10302 | 				i++; | 
 | 10303 |  | 
 | 10304 | 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) | 
 | 10305 | 					parity[k++] = buf8[i] & msk; | 
 | 10306 | 				i++; | 
 | 10307 | 			} | 
 | 10308 | 			data[j++] = buf8[i]; | 
 | 10309 | 		} | 
 | 10310 |  | 
 | 10311 | 		err = -EIO; | 
 | 10312 | 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { | 
 | 10313 | 			u8 hw8 = hweight8(data[i]); | 
 | 10314 |  | 
 | 10315 | 			if ((hw8 & 0x1) && parity[i]) | 
 | 10316 | 				goto out; | 
 | 10317 | 			else if (!(hw8 & 0x1) && !parity[i]) | 
 | 10318 | 				goto out; | 
 | 10319 | 		} | 
 | 10320 | 		err = 0; | 
 | 10321 | 		goto out; | 
 | 10322 | 	} | 
 | 10323 |  | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10324 | 	/* Bootstrap checksum at offset 0x10 */ | 
 | 10325 | 	csum = calc_crc((unsigned char *) buf, 0x10); | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 10326 | 	if (csum != be32_to_cpu(buf[0x10/4])) | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10327 | 		goto out; | 
 | 10328 |  | 
 | 10329 | 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */ | 
 | 10330 | 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 10331 | 	if (csum != be32_to_cpu(buf[0xfc/4])) | 
 | 10332 | 		goto out; | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10333 |  | 
 | 10334 | 	err = 0; | 
 | 10335 |  | 
 | 10336 | out: | 
 | 10337 | 	kfree(buf); | 
 | 10338 | 	return err; | 
 | 10339 | } | 
 | 10340 |  | 
| Michael Chan | ca43007 | 2005-05-29 14:57:23 -0700 | [diff] [blame] | 10341 | #define TG3_SERDES_TIMEOUT_SEC	2 | 
 | 10342 | #define TG3_COPPER_TIMEOUT_SEC	6 | 
 | 10343 |  | 
 | 10344 | static int tg3_test_link(struct tg3 *tp) | 
 | 10345 | { | 
 | 10346 | 	int i, max; | 
 | 10347 |  | 
 | 10348 | 	if (!netif_running(tp->dev)) | 
 | 10349 | 		return -ENODEV; | 
 | 10350 |  | 
| Michael Chan | 4c98748 | 2005-09-05 17:52:38 -0700 | [diff] [blame] | 10351 | 	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) | 
| Michael Chan | ca43007 | 2005-05-29 14:57:23 -0700 | [diff] [blame] | 10352 | 		max = TG3_SERDES_TIMEOUT_SEC; | 
 | 10353 | 	else | 
 | 10354 | 		max = TG3_COPPER_TIMEOUT_SEC; | 
 | 10355 |  | 
 | 10356 | 	for (i = 0; i < max; i++) { | 
 | 10357 | 		if (netif_carrier_ok(tp->dev)) | 
 | 10358 | 			return 0; | 
 | 10359 |  | 
 | 10360 | 		if (msleep_interruptible(1000)) | 
 | 10361 | 			break; | 
 | 10362 | 	} | 
 | 10363 |  | 
 | 10364 | 	return -EIO; | 
 | 10365 | } | 
 | 10366 |  | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10367 | /* Only test the commonly used registers */ | 
| David S. Miller | 30ca3e3 | 2006-03-20 23:02:36 -0800 | [diff] [blame] | 10368 | static int tg3_test_registers(struct tg3 *tp) | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10369 | { | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10370 | 	int i, is_5705, is_5750; | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10371 | 	u32 offset, read_mask, write_mask, val, save_val, read_val; | 
 | 10372 | 	static struct { | 
 | 10373 | 		u16 offset; | 
 | 10374 | 		u16 flags; | 
 | 10375 | #define TG3_FL_5705	0x1 | 
 | 10376 | #define TG3_FL_NOT_5705	0x2 | 
 | 10377 | #define TG3_FL_NOT_5788	0x4 | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10378 | #define TG3_FL_NOT_5750	0x8 | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10379 | 		u32 read_mask; | 
 | 10380 | 		u32 write_mask; | 
 | 10381 | 	} reg_tbl[] = { | 
 | 10382 | 		/* MAC Control Registers */ | 
 | 10383 | 		{ MAC_MODE, TG3_FL_NOT_5705, | 
 | 10384 | 			0x00000000, 0x00ef6f8c }, | 
 | 10385 | 		{ MAC_MODE, TG3_FL_5705, | 
 | 10386 | 			0x00000000, 0x01ef6b8c }, | 
 | 10387 | 		{ MAC_STATUS, TG3_FL_NOT_5705, | 
 | 10388 | 			0x03800107, 0x00000000 }, | 
 | 10389 | 		{ MAC_STATUS, TG3_FL_5705, | 
 | 10390 | 			0x03800100, 0x00000000 }, | 
 | 10391 | 		{ MAC_ADDR_0_HIGH, 0x0000, | 
 | 10392 | 			0x00000000, 0x0000ffff }, | 
 | 10393 | 		{ MAC_ADDR_0_LOW, 0x0000, | 
 | 10394 | 		       	0x00000000, 0xffffffff }, | 
 | 10395 | 		{ MAC_RX_MTU_SIZE, 0x0000, | 
 | 10396 | 			0x00000000, 0x0000ffff }, | 
 | 10397 | 		{ MAC_TX_MODE, 0x0000, | 
 | 10398 | 			0x00000000, 0x00000070 }, | 
 | 10399 | 		{ MAC_TX_LENGTHS, 0x0000, | 
 | 10400 | 			0x00000000, 0x00003fff }, | 
 | 10401 | 		{ MAC_RX_MODE, TG3_FL_NOT_5705, | 
 | 10402 | 			0x00000000, 0x000007fc }, | 
 | 10403 | 		{ MAC_RX_MODE, TG3_FL_5705, | 
 | 10404 | 			0x00000000, 0x000007dc }, | 
 | 10405 | 		{ MAC_HASH_REG_0, 0x0000, | 
 | 10406 | 			0x00000000, 0xffffffff }, | 
 | 10407 | 		{ MAC_HASH_REG_1, 0x0000, | 
 | 10408 | 			0x00000000, 0xffffffff }, | 
 | 10409 | 		{ MAC_HASH_REG_2, 0x0000, | 
 | 10410 | 			0x00000000, 0xffffffff }, | 
 | 10411 | 		{ MAC_HASH_REG_3, 0x0000, | 
 | 10412 | 			0x00000000, 0xffffffff }, | 
 | 10413 |  | 
 | 10414 | 		/* Receive Data and Receive BD Initiator Control Registers. */ | 
 | 10415 | 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, | 
 | 10416 | 			0x00000000, 0xffffffff }, | 
 | 10417 | 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, | 
 | 10418 | 			0x00000000, 0xffffffff }, | 
 | 10419 | 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, | 
 | 10420 | 			0x00000000, 0x00000003 }, | 
 | 10421 | 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, | 
 | 10422 | 			0x00000000, 0xffffffff }, | 
 | 10423 | 		{ RCVDBDI_STD_BD+0, 0x0000, | 
 | 10424 | 			0x00000000, 0xffffffff }, | 
 | 10425 | 		{ RCVDBDI_STD_BD+4, 0x0000, | 
 | 10426 | 			0x00000000, 0xffffffff }, | 
 | 10427 | 		{ RCVDBDI_STD_BD+8, 0x0000, | 
 | 10428 | 			0x00000000, 0xffff0002 }, | 
 | 10429 | 		{ RCVDBDI_STD_BD+0xc, 0x0000, | 
 | 10430 | 			0x00000000, 0xffffffff }, | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10431 |  | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10432 | 		/* Receive BD Initiator Control Registers. */ | 
 | 10433 | 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705, | 
 | 10434 | 			0x00000000, 0xffffffff }, | 
 | 10435 | 		{ RCVBDI_STD_THRESH, TG3_FL_5705, | 
 | 10436 | 			0x00000000, 0x000003ff }, | 
 | 10437 | 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, | 
 | 10438 | 			0x00000000, 0xffffffff }, | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10439 |  | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10440 | 		/* Host Coalescing Control Registers. */ | 
 | 10441 | 		{ HOSTCC_MODE, TG3_FL_NOT_5705, | 
 | 10442 | 			0x00000000, 0x00000004 }, | 
 | 10443 | 		{ HOSTCC_MODE, TG3_FL_5705, | 
 | 10444 | 			0x00000000, 0x000000f6 }, | 
 | 10445 | 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, | 
 | 10446 | 			0x00000000, 0xffffffff }, | 
 | 10447 | 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705, | 
 | 10448 | 			0x00000000, 0x000003ff }, | 
 | 10449 | 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, | 
 | 10450 | 			0x00000000, 0xffffffff }, | 
 | 10451 | 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705, | 
 | 10452 | 			0x00000000, 0x000003ff }, | 
 | 10453 | 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, | 
 | 10454 | 			0x00000000, 0xffffffff }, | 
 | 10455 | 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, | 
 | 10456 | 			0x00000000, 0x000000ff }, | 
 | 10457 | 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, | 
 | 10458 | 			0x00000000, 0xffffffff }, | 
 | 10459 | 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, | 
 | 10460 | 			0x00000000, 0x000000ff }, | 
 | 10461 | 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, | 
 | 10462 | 			0x00000000, 0xffffffff }, | 
 | 10463 | 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, | 
 | 10464 | 			0x00000000, 0xffffffff }, | 
 | 10465 | 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, | 
 | 10466 | 			0x00000000, 0xffffffff }, | 
 | 10467 | 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, | 
 | 10468 | 			0x00000000, 0x000000ff }, | 
 | 10469 | 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, | 
 | 10470 | 			0x00000000, 0xffffffff }, | 
 | 10471 | 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, | 
 | 10472 | 			0x00000000, 0x000000ff }, | 
 | 10473 | 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, | 
 | 10474 | 			0x00000000, 0xffffffff }, | 
 | 10475 | 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, | 
 | 10476 | 			0x00000000, 0xffffffff }, | 
 | 10477 | 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, | 
 | 10478 | 			0x00000000, 0xffffffff }, | 
 | 10479 | 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, | 
 | 10480 | 			0x00000000, 0xffffffff }, | 
 | 10481 | 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, | 
 | 10482 | 			0x00000000, 0xffffffff }, | 
 | 10483 | 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, | 
 | 10484 | 			0xffffffff, 0x00000000 }, | 
 | 10485 | 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, | 
 | 10486 | 			0xffffffff, 0x00000000 }, | 
 | 10487 |  | 
 | 10488 | 		/* Buffer Manager Control Registers. */ | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10489 | 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10490 | 			0x00000000, 0x007fff80 }, | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10491 | 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10492 | 			0x00000000, 0x007fffff }, | 
 | 10493 | 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000, | 
 | 10494 | 			0x00000000, 0x0000003f }, | 
 | 10495 | 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000, | 
 | 10496 | 			0x00000000, 0x000001ff }, | 
 | 10497 | 		{ BUFMGR_MB_HIGH_WATER, 0x0000, | 
 | 10498 | 			0x00000000, 0x000001ff }, | 
 | 10499 | 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, | 
 | 10500 | 			0xffffffff, 0x00000000 }, | 
 | 10501 | 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, | 
 | 10502 | 			0xffffffff, 0x00000000 }, | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10503 |  | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10504 | 		/* Mailbox Registers */ | 
 | 10505 | 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, | 
 | 10506 | 			0x00000000, 0x000001ff }, | 
 | 10507 | 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, | 
 | 10508 | 			0x00000000, 0x000001ff }, | 
 | 10509 | 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, | 
 | 10510 | 			0x00000000, 0x000007ff }, | 
 | 10511 | 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, | 
 | 10512 | 			0x00000000, 0x000001ff }, | 
 | 10513 |  | 
 | 10514 | 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 }, | 
 | 10515 | 	}; | 
 | 10516 |  | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10517 | 	is_5705 = is_5750 = 0; | 
 | 10518 | 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10519 | 		is_5705 = 1; | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10520 | 		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 
 | 10521 | 			is_5750 = 1; | 
 | 10522 | 	} | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10523 |  | 
 | 10524 | 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) { | 
 | 10525 | 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) | 
 | 10526 | 			continue; | 
 | 10527 |  | 
 | 10528 | 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) | 
 | 10529 | 			continue; | 
 | 10530 |  | 
 | 10531 | 		if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) && | 
 | 10532 | 		    (reg_tbl[i].flags & TG3_FL_NOT_5788)) | 
 | 10533 | 			continue; | 
 | 10534 |  | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10535 | 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) | 
 | 10536 | 			continue; | 
 | 10537 |  | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10538 | 		offset = (u32) reg_tbl[i].offset; | 
 | 10539 | 		read_mask = reg_tbl[i].read_mask; | 
 | 10540 | 		write_mask = reg_tbl[i].write_mask; | 
 | 10541 |  | 
 | 10542 | 		/* Save the original register content */ | 
 | 10543 | 		save_val = tr32(offset); | 
 | 10544 |  | 
 | 10545 | 		/* Determine the read-only value. */ | 
 | 10546 | 		read_val = save_val & read_mask; | 
 | 10547 |  | 
 | 10548 | 		/* Write zero to the register, then make sure the read-only bits | 
 | 10549 | 		 * are not changed and the read/write bits are all zeros. | 
 | 10550 | 		 */ | 
 | 10551 | 		tw32(offset, 0); | 
 | 10552 |  | 
 | 10553 | 		val = tr32(offset); | 
 | 10554 |  | 
 | 10555 | 		/* Test the read-only and read/write bits. */ | 
 | 10556 | 		if (((val & read_mask) != read_val) || (val & write_mask)) | 
 | 10557 | 			goto out; | 
 | 10558 |  | 
 | 10559 | 		/* Write ones to all the bits defined by RdMask and WrMask, then | 
 | 10560 | 		 * make sure the read-only bits are not changed and the | 
 | 10561 | 		 * read/write bits are all ones. | 
 | 10562 | 		 */ | 
 | 10563 | 		tw32(offset, read_mask | write_mask); | 
 | 10564 |  | 
 | 10565 | 		val = tr32(offset); | 
 | 10566 |  | 
 | 10567 | 		/* Test the read-only bits. */ | 
 | 10568 | 		if ((val & read_mask) != read_val) | 
 | 10569 | 			goto out; | 
 | 10570 |  | 
 | 10571 | 		/* Test the read/write bits. */ | 
 | 10572 | 		if ((val & write_mask) != write_mask) | 
 | 10573 | 			goto out; | 
 | 10574 |  | 
 | 10575 | 		tw32(offset, save_val); | 
 | 10576 | 	} | 
 | 10577 |  | 
 | 10578 | 	return 0; | 
 | 10579 |  | 
 | 10580 | out: | 
| Michael Chan | 9f88f29 | 2006-12-07 00:22:54 -0800 | [diff] [blame] | 10581 | 	if (netif_msg_hw(tp)) | 
 | 10582 | 		printk(KERN_ERR PFX "Register test failed at offset %x\n", | 
 | 10583 | 		       offset); | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10584 | 	tw32(offset, save_val); | 
 | 10585 | 	return -EIO; | 
 | 10586 | } | 
 | 10587 |  | 
| Michael Chan | 7942e1d | 2005-05-29 14:58:36 -0700 | [diff] [blame] | 10588 | static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) | 
 | 10589 | { | 
| Arjan van de Ven | f71e130 | 2006-03-03 21:33:57 -0500 | [diff] [blame] | 10590 | 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; | 
| Michael Chan | 7942e1d | 2005-05-29 14:58:36 -0700 | [diff] [blame] | 10591 | 	int i; | 
 | 10592 | 	u32 j; | 
 | 10593 |  | 
| Alejandro Martinez Ruiz | e9edda6 | 2007-10-15 03:37:43 +0200 | [diff] [blame] | 10594 | 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { | 
| Michael Chan | 7942e1d | 2005-05-29 14:58:36 -0700 | [diff] [blame] | 10595 | 		for (j = 0; j < len; j += 4) { | 
 | 10596 | 			u32 val; | 
 | 10597 |  | 
 | 10598 | 			tg3_write_mem(tp, offset + j, test_pattern[i]); | 
 | 10599 | 			tg3_read_mem(tp, offset + j, &val); | 
 | 10600 | 			if (val != test_pattern[i]) | 
 | 10601 | 				return -EIO; | 
 | 10602 | 		} | 
 | 10603 | 	} | 
 | 10604 | 	return 0; | 
 | 10605 | } | 
 | 10606 |  | 
 | 10607 | static int tg3_test_memory(struct tg3 *tp) | 
 | 10608 | { | 
 | 10609 | 	static struct mem_entry { | 
 | 10610 | 		u32 offset; | 
 | 10611 | 		u32 len; | 
 | 10612 | 	} mem_tbl_570x[] = { | 
| Michael Chan | 3869019 | 2005-12-19 16:27:28 -0800 | [diff] [blame] | 10613 | 		{ 0x00000000, 0x00b50}, | 
| Michael Chan | 7942e1d | 2005-05-29 14:58:36 -0700 | [diff] [blame] | 10614 | 		{ 0x00002000, 0x1c000}, | 
 | 10615 | 		{ 0xffffffff, 0x00000} | 
 | 10616 | 	}, mem_tbl_5705[] = { | 
 | 10617 | 		{ 0x00000100, 0x0000c}, | 
 | 10618 | 		{ 0x00000200, 0x00008}, | 
| Michael Chan | 7942e1d | 2005-05-29 14:58:36 -0700 | [diff] [blame] | 10619 | 		{ 0x00004000, 0x00800}, | 
 | 10620 | 		{ 0x00006000, 0x01000}, | 
 | 10621 | 		{ 0x00008000, 0x02000}, | 
 | 10622 | 		{ 0x00010000, 0x0e000}, | 
 | 10623 | 		{ 0xffffffff, 0x00000} | 
| Michael Chan | 79f4d13 | 2006-03-20 22:28:57 -0800 | [diff] [blame] | 10624 | 	}, mem_tbl_5755[] = { | 
 | 10625 | 		{ 0x00000200, 0x00008}, | 
 | 10626 | 		{ 0x00004000, 0x00800}, | 
 | 10627 | 		{ 0x00006000, 0x00800}, | 
 | 10628 | 		{ 0x00008000, 0x02000}, | 
 | 10629 | 		{ 0x00010000, 0x0c000}, | 
 | 10630 | 		{ 0xffffffff, 0x00000} | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10631 | 	}, mem_tbl_5906[] = { | 
 | 10632 | 		{ 0x00000200, 0x00008}, | 
 | 10633 | 		{ 0x00004000, 0x00400}, | 
 | 10634 | 		{ 0x00006000, 0x00400}, | 
 | 10635 | 		{ 0x00008000, 0x01000}, | 
 | 10636 | 		{ 0x00010000, 0x01000}, | 
 | 10637 | 		{ 0xffffffff, 0x00000} | 
| Michael Chan | 7942e1d | 2005-05-29 14:58:36 -0700 | [diff] [blame] | 10638 | 	}; | 
 | 10639 | 	struct mem_entry *mem_tbl; | 
 | 10640 | 	int err = 0; | 
 | 10641 | 	int i; | 
 | 10642 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 10643 | 	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 
 | 10644 | 		mem_tbl = mem_tbl_5755; | 
 | 10645 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 
 | 10646 | 		mem_tbl = mem_tbl_5906; | 
 | 10647 | 	else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 
 | 10648 | 		mem_tbl = mem_tbl_5705; | 
 | 10649 | 	else | 
| Michael Chan | 7942e1d | 2005-05-29 14:58:36 -0700 | [diff] [blame] | 10650 | 		mem_tbl = mem_tbl_570x; | 
 | 10651 |  | 
 | 10652 | 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { | 
 | 10653 | 		if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset, | 
 | 10654 | 		    mem_tbl[i].len)) != 0) | 
 | 10655 | 			break; | 
 | 10656 | 	} | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10657 |  | 
| Michael Chan | 7942e1d | 2005-05-29 14:58:36 -0700 | [diff] [blame] | 10658 | 	return err; | 
 | 10659 | } | 
 | 10660 |  | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10661 | #define TG3_MAC_LOOPBACK	0 | 
 | 10662 | #define TG3_PHY_LOOPBACK	1 | 
 | 10663 |  | 
 | 10664 | static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10665 | { | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10666 | 	u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key; | 
| Matt Carlson | fd2ce37 | 2009-09-01 12:51:13 +0000 | [diff] [blame] | 10667 | 	u32 desc_idx, coal_now; | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10668 | 	struct sk_buff *skb, *rx_skb; | 
 | 10669 | 	u8 *tx_data; | 
 | 10670 | 	dma_addr_t map; | 
 | 10671 | 	int num_pkts, tx_len, rx_len, i, err; | 
 | 10672 | 	struct tg3_rx_buffer_desc *desc; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 10673 | 	struct tg3_napi *tnapi, *rnapi; | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 10674 | 	struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10675 |  | 
| Matt Carlson | 0c1d0e2 | 2009-09-01 13:16:33 +0000 | [diff] [blame] | 10676 | 	if (tp->irq_cnt > 1) { | 
 | 10677 | 		tnapi = &tp->napi[1]; | 
 | 10678 | 		rnapi = &tp->napi[1]; | 
 | 10679 | 	} else { | 
 | 10680 | 		tnapi = &tp->napi[0]; | 
 | 10681 | 		rnapi = &tp->napi[0]; | 
 | 10682 | 	} | 
| Matt Carlson | fd2ce37 | 2009-09-01 12:51:13 +0000 | [diff] [blame] | 10683 | 	coal_now = tnapi->coal_now | rnapi->coal_now; | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 10684 |  | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10685 | 	if (loopback_mode == TG3_MAC_LOOPBACK) { | 
| Michael Chan | c94e394 | 2005-09-27 12:12:42 -0700 | [diff] [blame] | 10686 | 		/* HW errata - mac loopback fails in some cases on 5780. | 
 | 10687 | 		 * Normal traffic and PHY loopback are not affected by | 
 | 10688 | 		 * errata. | 
 | 10689 | 		 */ | 
 | 10690 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) | 
 | 10691 | 			return 0; | 
 | 10692 |  | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10693 | 		mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | | 
| Matt Carlson | e8f3f6c | 2007-07-11 19:47:55 -0700 | [diff] [blame] | 10694 | 			   MAC_MODE_PORT_INT_LPBACK; | 
 | 10695 | 		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
 | 10696 | 			mac_mode |= MAC_MODE_LINK_POLARITY; | 
| Michael Chan | 3f7045c | 2006-09-27 16:02:29 -0700 | [diff] [blame] | 10697 | 		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) | 
 | 10698 | 			mac_mode |= MAC_MODE_PORT_MODE_MII; | 
 | 10699 | 		else | 
 | 10700 | 			mac_mode |= MAC_MODE_PORT_MODE_GMII; | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10701 | 		tw32(MAC_MODE, mac_mode); | 
 | 10702 | 	} else if (loopback_mode == TG3_PHY_LOOPBACK) { | 
| Michael Chan | 3f7045c | 2006-09-27 16:02:29 -0700 | [diff] [blame] | 10703 | 		u32 val; | 
 | 10704 |  | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 10705 | 		if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | 
 | 10706 | 			tg3_phy_fet_toggle_apd(tp, false); | 
| Michael Chan | 5d64ad3 | 2006-12-07 00:19:40 -0800 | [diff] [blame] | 10707 | 			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100; | 
 | 10708 | 		} else | 
 | 10709 | 			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; | 
| Michael Chan | 3f7045c | 2006-09-27 16:02:29 -0700 | [diff] [blame] | 10710 |  | 
| Matt Carlson | 9ef8ca9 | 2007-07-11 19:48:29 -0700 | [diff] [blame] | 10711 | 		tg3_phy_toggle_automdix(tp, 0); | 
 | 10712 |  | 
| Michael Chan | 3f7045c | 2006-09-27 16:02:29 -0700 | [diff] [blame] | 10713 | 		tg3_writephy(tp, MII_BMCR, val); | 
| Michael Chan | c94e394 | 2005-09-27 12:12:42 -0700 | [diff] [blame] | 10714 | 		udelay(40); | 
| Michael Chan | 5d64ad3 | 2006-12-07 00:19:40 -0800 | [diff] [blame] | 10715 |  | 
| Matt Carlson | e8f3f6c | 2007-07-11 19:47:55 -0700 | [diff] [blame] | 10716 | 		mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 10717 | 		if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | 
 | 10718 | 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 
 | 10719 | 				tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800); | 
| Michael Chan | 5d64ad3 | 2006-12-07 00:19:40 -0800 | [diff] [blame] | 10720 | 			mac_mode |= MAC_MODE_PORT_MODE_MII; | 
 | 10721 | 		} else | 
 | 10722 | 			mac_mode |= MAC_MODE_PORT_MODE_GMII; | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 10723 |  | 
| Michael Chan | c94e394 | 2005-09-27 12:12:42 -0700 | [diff] [blame] | 10724 | 		/* reset to prevent losing 1st rx packet intermittently */ | 
 | 10725 | 		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { | 
 | 10726 | 			tw32_f(MAC_RX_MODE, RX_MODE_RESET); | 
 | 10727 | 			udelay(10); | 
 | 10728 | 			tw32_f(MAC_RX_MODE, tp->rx_mode); | 
 | 10729 | 		} | 
| Matt Carlson | e8f3f6c | 2007-07-11 19:47:55 -0700 | [diff] [blame] | 10730 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { | 
 | 10731 | 			if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) | 
 | 10732 | 				mac_mode &= ~MAC_MODE_LINK_POLARITY; | 
 | 10733 | 			else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) | 
 | 10734 | 				mac_mode |= MAC_MODE_LINK_POLARITY; | 
| Michael Chan | ff18ff0 | 2006-03-27 23:17:27 -0800 | [diff] [blame] | 10735 | 			tg3_writephy(tp, MII_TG3_EXT_CTRL, | 
 | 10736 | 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE); | 
 | 10737 | 		} | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10738 | 		tw32(MAC_MODE, mac_mode); | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10739 | 	} | 
 | 10740 | 	else | 
 | 10741 | 		return -EINVAL; | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10742 |  | 
 | 10743 | 	err = -EIO; | 
 | 10744 |  | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10745 | 	tx_len = 1514; | 
| David S. Miller | a20e9c6 | 2006-07-31 22:38:16 -0700 | [diff] [blame] | 10746 | 	skb = netdev_alloc_skb(tp->dev, tx_len); | 
| Jesper Juhl | a50bb7b | 2006-05-09 23:14:35 -0700 | [diff] [blame] | 10747 | 	if (!skb) | 
 | 10748 | 		return -ENOMEM; | 
 | 10749 |  | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10750 | 	tx_data = skb_put(skb, tx_len); | 
 | 10751 | 	memcpy(tx_data, tp->dev->dev_addr, 6); | 
 | 10752 | 	memset(tx_data + 6, 0x0, 8); | 
 | 10753 |  | 
 | 10754 | 	tw32(MAC_RX_MTU_SIZE, tx_len + 4); | 
 | 10755 |  | 
 | 10756 | 	for (i = 14; i < tx_len; i++) | 
 | 10757 | 		tx_data[i] = (u8) (i & 0xff); | 
 | 10758 |  | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 10759 | 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); | 
 | 10760 | 	if (pci_dma_mapping_error(tp->pdev, map)) { | 
| Matt Carlson | a21771d | 2009-11-02 14:25:31 +0000 | [diff] [blame] | 10761 | 		dev_kfree_skb(skb); | 
 | 10762 | 		return -EIO; | 
 | 10763 | 	} | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10764 |  | 
 | 10765 | 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 
| Matt Carlson | fd2ce37 | 2009-09-01 12:51:13 +0000 | [diff] [blame] | 10766 | 	       rnapi->coal_now); | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10767 |  | 
 | 10768 | 	udelay(10); | 
 | 10769 |  | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 10770 | 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer; | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10771 |  | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10772 | 	num_pkts = 0; | 
 | 10773 |  | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 10774 | 	tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10775 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 10776 | 	tnapi->tx_prod++; | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10777 | 	num_pkts++; | 
 | 10778 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 10779 | 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); | 
 | 10780 | 	tr32_mailbox(tnapi->prodmbox); | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10781 |  | 
 | 10782 | 	udelay(10); | 
 | 10783 |  | 
| Matt Carlson | 303fc92 | 2009-11-02 14:27:34 +0000 | [diff] [blame] | 10784 | 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */ | 
 | 10785 | 	for (i = 0; i < 35; i++) { | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10786 | 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 
| Matt Carlson | fd2ce37 | 2009-09-01 12:51:13 +0000 | [diff] [blame] | 10787 | 		       coal_now); | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10788 |  | 
 | 10789 | 		udelay(10); | 
 | 10790 |  | 
| Matt Carlson | 898a56f | 2009-08-28 14:02:40 +0000 | [diff] [blame] | 10791 | 		tx_idx = tnapi->hw_status->idx[0].tx_consumer; | 
 | 10792 | 		rx_idx = rnapi->hw_status->idx[0].rx_producer; | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 10793 | 		if ((tx_idx == tnapi->tx_prod) && | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10794 | 		    (rx_idx == (rx_start_idx + num_pkts))) | 
 | 10795 | 			break; | 
 | 10796 | 	} | 
 | 10797 |  | 
| Alexander Duyck | f4188d8 | 2009-12-02 16:48:38 +0000 | [diff] [blame] | 10798 | 	pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10799 | 	dev_kfree_skb(skb); | 
 | 10800 |  | 
| Matt Carlson | f3f3f27 | 2009-08-28 14:03:21 +0000 | [diff] [blame] | 10801 | 	if (tx_idx != tnapi->tx_prod) | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10802 | 		goto out; | 
 | 10803 |  | 
 | 10804 | 	if (rx_idx != rx_start_idx + num_pkts) | 
 | 10805 | 		goto out; | 
 | 10806 |  | 
| Matt Carlson | 7233448 | 2009-08-28 14:03:01 +0000 | [diff] [blame] | 10807 | 	desc = &rnapi->rx_rcb[rx_start_idx]; | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10808 | 	desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; | 
 | 10809 | 	opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; | 
 | 10810 | 	if (opaque_key != RXD_OPAQUE_RING_STD) | 
 | 10811 | 		goto out; | 
 | 10812 |  | 
 | 10813 | 	if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 
 | 10814 | 	    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) | 
 | 10815 | 		goto out; | 
 | 10816 |  | 
 | 10817 | 	rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; | 
 | 10818 | 	if (rx_len != tx_len) | 
 | 10819 | 		goto out; | 
 | 10820 |  | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 10821 | 	rx_skb = tpr->rx_std_buffers[desc_idx].skb; | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10822 |  | 
| Matt Carlson | 21f581a | 2009-08-28 14:00:25 +0000 | [diff] [blame] | 10823 | 	map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10824 | 	pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); | 
 | 10825 |  | 
 | 10826 | 	for (i = 14; i < tx_len; i++) { | 
 | 10827 | 		if (*(rx_skb->data + i) != (u8) (i & 0xff)) | 
 | 10828 | 			goto out; | 
 | 10829 | 	} | 
 | 10830 | 	err = 0; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 10831 |  | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10832 | 	/* tg3_free_rings will unmap and free the rx_skb */ | 
 | 10833 | out: | 
 | 10834 | 	return err; | 
 | 10835 | } | 
 | 10836 |  | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10837 | #define TG3_MAC_LOOPBACK_FAILED		1 | 
 | 10838 | #define TG3_PHY_LOOPBACK_FAILED		2 | 
 | 10839 | #define TG3_LOOPBACK_FAILED		(TG3_MAC_LOOPBACK_FAILED |	\ | 
 | 10840 | 					 TG3_PHY_LOOPBACK_FAILED) | 
 | 10841 |  | 
 | 10842 | static int tg3_test_loopback(struct tg3 *tp) | 
 | 10843 | { | 
 | 10844 | 	int err = 0; | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 10845 | 	u32 cpmuctrl = 0; | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10846 |  | 
 | 10847 | 	if (!netif_running(tp->dev)) | 
 | 10848 | 		return TG3_LOOPBACK_FAILED; | 
 | 10849 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 10850 | 	err = tg3_reset_hw(tp, 1); | 
 | 10851 | 	if (err) | 
 | 10852 | 		return TG3_LOOPBACK_FAILED; | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10853 |  | 
| Matt Carlson | 6833c04 | 2008-11-21 17:18:59 -0800 | [diff] [blame] | 10854 | 	/* Turn off gphy autopowerdown. */ | 
 | 10855 | 	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) | 
 | 10856 | 		tg3_phy_toggle_apd(tp, false); | 
 | 10857 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 10858 | 	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 10859 | 		int i; | 
 | 10860 | 		u32 status; | 
 | 10861 |  | 
 | 10862 | 		tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER); | 
 | 10863 |  | 
 | 10864 | 		/* Wait for up to 40 microseconds to acquire lock. */ | 
 | 10865 | 		for (i = 0; i < 4; i++) { | 
 | 10866 | 			status = tr32(TG3_CPMU_MUTEX_GNT); | 
 | 10867 | 			if (status == CPMU_MUTEX_GNT_DRIVER) | 
 | 10868 | 				break; | 
 | 10869 | 			udelay(10); | 
 | 10870 | 		} | 
 | 10871 |  | 
 | 10872 | 		if (status != CPMU_MUTEX_GNT_DRIVER) | 
 | 10873 | 			return TG3_LOOPBACK_FAILED; | 
 | 10874 |  | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 10875 | 		/* Turn off link-based power management. */ | 
| Matt Carlson | e875093 | 2007-11-12 21:11:51 -0800 | [diff] [blame] | 10876 | 		cpmuctrl = tr32(TG3_CPMU_CTRL); | 
| Matt Carlson | 109115e | 2008-05-02 16:48:59 -0700 | [diff] [blame] | 10877 | 		tw32(TG3_CPMU_CTRL, | 
 | 10878 | 		     cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE | | 
 | 10879 | 				  CPMU_CTRL_LINK_AWARE_MODE)); | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 10880 | 	} | 
 | 10881 |  | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10882 | 	if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) | 
 | 10883 | 		err |= TG3_MAC_LOOPBACK_FAILED; | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 10884 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 10885 | 	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 10886 | 		tw32(TG3_CPMU_CTRL, cpmuctrl); | 
 | 10887 |  | 
 | 10888 | 		/* Release the mutex */ | 
 | 10889 | 		tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); | 
 | 10890 | 	} | 
 | 10891 |  | 
| Matt Carlson | dd47700 | 2008-05-25 23:45:58 -0700 | [diff] [blame] | 10892 | 	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) && | 
 | 10893 | 	    !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10894 | 		if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) | 
 | 10895 | 			err |= TG3_PHY_LOOPBACK_FAILED; | 
 | 10896 | 	} | 
 | 10897 |  | 
| Matt Carlson | 6833c04 | 2008-11-21 17:18:59 -0800 | [diff] [blame] | 10898 | 	/* Re-enable gphy autopowerdown. */ | 
 | 10899 | 	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD) | 
 | 10900 | 		tg3_phy_toggle_apd(tp, true); | 
 | 10901 |  | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10902 | 	return err; | 
 | 10903 | } | 
 | 10904 |  | 
| Michael Chan | 4cafd3f | 2005-05-29 14:56:34 -0700 | [diff] [blame] | 10905 | static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | 
 | 10906 | 			  u64 *data) | 
 | 10907 | { | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10908 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 10909 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 10910 | 	if (tp->link_config.phy_is_low_power) | 
 | 10911 | 		tg3_set_power_state(tp, PCI_D0); | 
 | 10912 |  | 
| Michael Chan | 566f86a | 2005-05-29 14:56:58 -0700 | [diff] [blame] | 10913 | 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST); | 
 | 10914 |  | 
 | 10915 | 	if (tg3_test_nvram(tp) != 0) { | 
 | 10916 | 		etest->flags |= ETH_TEST_FL_FAILED; | 
 | 10917 | 		data[0] = 1; | 
 | 10918 | 	} | 
| Michael Chan | ca43007 | 2005-05-29 14:57:23 -0700 | [diff] [blame] | 10919 | 	if (tg3_test_link(tp) != 0) { | 
 | 10920 | 		etest->flags |= ETH_TEST_FL_FAILED; | 
 | 10921 | 		data[1] = 1; | 
 | 10922 | 	} | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10923 | 	if (etest->flags & ETH_TEST_FL_OFFLINE) { | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10924 | 		int err, err2 = 0, irq_sync = 0; | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10925 |  | 
| Michael Chan | bbe832c | 2005-06-24 20:20:04 -0700 | [diff] [blame] | 10926 | 		if (netif_running(dev)) { | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10927 | 			tg3_phy_stop(tp); | 
| Michael Chan | bbe832c | 2005-06-24 20:20:04 -0700 | [diff] [blame] | 10928 | 			tg3_netif_stop(tp); | 
 | 10929 | 			irq_sync = 1; | 
 | 10930 | 		} | 
 | 10931 |  | 
 | 10932 | 		tg3_full_lock(tp, irq_sync); | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10933 |  | 
 | 10934 | 		tg3_halt(tp, RESET_KIND_SUSPEND, 1); | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 10935 | 		err = tg3_nvram_lock(tp); | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10936 | 		tg3_halt_cpu(tp, RX_CPU_BASE); | 
 | 10937 | 		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
 | 10938 | 			tg3_halt_cpu(tp, TX_CPU_BASE); | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 10939 | 		if (!err) | 
 | 10940 | 			tg3_nvram_unlock(tp); | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10941 |  | 
| Michael Chan | d9ab5ad | 2006-03-20 22:27:35 -0800 | [diff] [blame] | 10942 | 		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) | 
 | 10943 | 			tg3_phy_reset(tp); | 
 | 10944 |  | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10945 | 		if (tg3_test_registers(tp) != 0) { | 
 | 10946 | 			etest->flags |= ETH_TEST_FL_FAILED; | 
 | 10947 | 			data[2] = 1; | 
 | 10948 | 		} | 
| Michael Chan | 7942e1d | 2005-05-29 14:58:36 -0700 | [diff] [blame] | 10949 | 		if (tg3_test_memory(tp) != 0) { | 
 | 10950 | 			etest->flags |= ETH_TEST_FL_FAILED; | 
 | 10951 | 			data[3] = 1; | 
 | 10952 | 		} | 
| Michael Chan | 9f40dea | 2005-09-05 17:53:06 -0700 | [diff] [blame] | 10953 | 		if ((data[4] = tg3_test_loopback(tp)) != 0) | 
| Michael Chan | c76949a | 2005-05-29 14:58:59 -0700 | [diff] [blame] | 10954 | 			etest->flags |= ETH_TEST_FL_FAILED; | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10955 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 10956 | 		tg3_full_unlock(tp); | 
 | 10957 |  | 
| Michael Chan | d4bc392 | 2005-05-29 14:59:20 -0700 | [diff] [blame] | 10958 | 		if (tg3_test_interrupt(tp) != 0) { | 
 | 10959 | 			etest->flags |= ETH_TEST_FL_FAILED; | 
 | 10960 | 			data[5] = 1; | 
 | 10961 | 		} | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 10962 |  | 
 | 10963 | 		tg3_full_lock(tp, 0); | 
| Michael Chan | d4bc392 | 2005-05-29 14:59:20 -0700 | [diff] [blame] | 10964 |  | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10965 | 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
 | 10966 | 		if (netif_running(dev)) { | 
 | 10967 | 			tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10968 | 			err2 = tg3_restart_hw(tp, 1); | 
 | 10969 | 			if (!err2) | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 10970 | 				tg3_netif_start(tp); | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10971 | 		} | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 10972 |  | 
 | 10973 | 		tg3_full_unlock(tp); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10974 |  | 
 | 10975 | 		if (irq_sync && !err2) | 
 | 10976 | 			tg3_phy_start(tp); | 
| Michael Chan | a71116d | 2005-05-29 14:58:11 -0700 | [diff] [blame] | 10977 | 	} | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 10978 | 	if (tp->link_config.phy_is_low_power) | 
 | 10979 | 		tg3_set_power_state(tp, PCI_D3hot); | 
 | 10980 |  | 
| Michael Chan | 4cafd3f | 2005-05-29 14:56:34 -0700 | [diff] [blame] | 10981 | } | 
 | 10982 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10983 | static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 
 | 10984 | { | 
 | 10985 | 	struct mii_ioctl_data *data = if_mii(ifr); | 
 | 10986 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 10987 | 	int err; | 
 | 10988 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10989 | 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 10990 | 		struct phy_device *phydev; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10991 | 		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 
 | 10992 | 			return -EAGAIN; | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 10993 | 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
 | 10994 | 		return phy_mii_ioctl(phydev, data, cmd); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 10995 | 	} | 
 | 10996 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10997 | 	switch(cmd) { | 
 | 10998 | 	case SIOCGMIIPHY: | 
| Matt Carlson | 882e979 | 2009-09-01 13:21:36 +0000 | [diff] [blame] | 10999 | 		data->phy_id = tp->phy_addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11000 |  | 
 | 11001 | 		/* fallthru */ | 
 | 11002 | 	case SIOCGMIIREG: { | 
 | 11003 | 		u32 mii_regval; | 
 | 11004 |  | 
 | 11005 | 		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) | 
 | 11006 | 			break;			/* We have no PHY */ | 
 | 11007 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 11008 | 		if (tp->link_config.phy_is_low_power) | 
 | 11009 | 			return -EAGAIN; | 
 | 11010 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 11011 | 		spin_lock_bh(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11012 | 		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 11013 | 		spin_unlock_bh(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11014 |  | 
 | 11015 | 		data->val_out = mii_regval; | 
 | 11016 |  | 
 | 11017 | 		return err; | 
 | 11018 | 	} | 
 | 11019 |  | 
 | 11020 | 	case SIOCSMIIREG: | 
 | 11021 | 		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) | 
 | 11022 | 			break;			/* We have no PHY */ | 
 | 11023 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 11024 | 		if (tp->link_config.phy_is_low_power) | 
 | 11025 | 			return -EAGAIN; | 
 | 11026 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 11027 | 		spin_lock_bh(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11028 | 		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 11029 | 		spin_unlock_bh(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11030 |  | 
 | 11031 | 		return err; | 
 | 11032 |  | 
 | 11033 | 	default: | 
 | 11034 | 		/* do nothing */ | 
 | 11035 | 		break; | 
 | 11036 | 	} | 
 | 11037 | 	return -EOPNOTSUPP; | 
 | 11038 | } | 
 | 11039 |  | 
 | 11040 | #if TG3_VLAN_TAG_USED | 
 | 11041 | static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | 
 | 11042 | { | 
 | 11043 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 11044 |  | 
| Matt Carlson | 844b3ee | 2009-02-25 14:23:56 +0000 | [diff] [blame] | 11045 | 	if (!netif_running(dev)) { | 
 | 11046 | 		tp->vlgrp = grp; | 
 | 11047 | 		return; | 
 | 11048 | 	} | 
 | 11049 |  | 
 | 11050 | 	tg3_netif_stop(tp); | 
| Michael Chan | 29315e8 | 2006-06-29 20:12:30 -0700 | [diff] [blame] | 11051 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 11052 | 	tg3_full_lock(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11053 |  | 
 | 11054 | 	tp->vlgrp = grp; | 
 | 11055 |  | 
 | 11056 | 	/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ | 
 | 11057 | 	__tg3_set_rx_mode(dev); | 
 | 11058 |  | 
| Matt Carlson | 844b3ee | 2009-02-25 14:23:56 +0000 | [diff] [blame] | 11059 | 	tg3_netif_start(tp); | 
| Michael Chan | 4696654 | 2007-07-11 19:47:19 -0700 | [diff] [blame] | 11060 |  | 
 | 11061 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11062 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11063 | #endif | 
 | 11064 |  | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 11065 | static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | 
 | 11066 | { | 
 | 11067 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 11068 |  | 
 | 11069 | 	memcpy(ec, &tp->coal, sizeof(*ec)); | 
 | 11070 | 	return 0; | 
 | 11071 | } | 
 | 11072 |  | 
| Michael Chan | d244c89 | 2005-07-05 14:42:33 -0700 | [diff] [blame] | 11073 | static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | 
 | 11074 | { | 
 | 11075 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 11076 | 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; | 
 | 11077 | 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; | 
 | 11078 |  | 
 | 11079 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { | 
 | 11080 | 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; | 
 | 11081 | 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; | 
 | 11082 | 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS; | 
 | 11083 | 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS; | 
 | 11084 | 	} | 
 | 11085 |  | 
 | 11086 | 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || | 
 | 11087 | 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || | 
 | 11088 | 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || | 
 | 11089 | 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || | 
 | 11090 | 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || | 
 | 11091 | 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || | 
 | 11092 | 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || | 
 | 11093 | 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || | 
 | 11094 | 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || | 
 | 11095 | 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) | 
 | 11096 | 		return -EINVAL; | 
 | 11097 |  | 
 | 11098 | 	/* No rx interrupts will be generated if both are zero */ | 
 | 11099 | 	if ((ec->rx_coalesce_usecs == 0) && | 
 | 11100 | 	    (ec->rx_max_coalesced_frames == 0)) | 
 | 11101 | 		return -EINVAL; | 
 | 11102 |  | 
 | 11103 | 	/* No tx interrupts will be generated if both are zero */ | 
 | 11104 | 	if ((ec->tx_coalesce_usecs == 0) && | 
 | 11105 | 	    (ec->tx_max_coalesced_frames == 0)) | 
 | 11106 | 		return -EINVAL; | 
 | 11107 |  | 
 | 11108 | 	/* Only copy relevant parameters, ignore all others. */ | 
 | 11109 | 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; | 
 | 11110 | 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; | 
 | 11111 | 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; | 
 | 11112 | 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; | 
 | 11113 | 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; | 
 | 11114 | 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; | 
 | 11115 | 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; | 
 | 11116 | 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; | 
 | 11117 | 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; | 
 | 11118 |  | 
 | 11119 | 	if (netif_running(dev)) { | 
 | 11120 | 		tg3_full_lock(tp, 0); | 
 | 11121 | 		__tg3_set_coalesce(tp, &tp->coal); | 
 | 11122 | 		tg3_full_unlock(tp); | 
 | 11123 | 	} | 
 | 11124 | 	return 0; | 
 | 11125 | } | 
 | 11126 |  | 
| Jeff Garzik | 7282d49 | 2006-09-13 14:30:00 -0400 | [diff] [blame] | 11127 | static const struct ethtool_ops tg3_ethtool_ops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11128 | 	.get_settings		= tg3_get_settings, | 
 | 11129 | 	.set_settings		= tg3_set_settings, | 
 | 11130 | 	.get_drvinfo		= tg3_get_drvinfo, | 
 | 11131 | 	.get_regs_len		= tg3_get_regs_len, | 
 | 11132 | 	.get_regs		= tg3_get_regs, | 
 | 11133 | 	.get_wol		= tg3_get_wol, | 
 | 11134 | 	.set_wol		= tg3_set_wol, | 
 | 11135 | 	.get_msglevel		= tg3_get_msglevel, | 
 | 11136 | 	.set_msglevel		= tg3_set_msglevel, | 
 | 11137 | 	.nway_reset		= tg3_nway_reset, | 
 | 11138 | 	.get_link		= ethtool_op_get_link, | 
 | 11139 | 	.get_eeprom_len		= tg3_get_eeprom_len, | 
 | 11140 | 	.get_eeprom		= tg3_get_eeprom, | 
 | 11141 | 	.set_eeprom		= tg3_set_eeprom, | 
 | 11142 | 	.get_ringparam		= tg3_get_ringparam, | 
 | 11143 | 	.set_ringparam		= tg3_set_ringparam, | 
 | 11144 | 	.get_pauseparam		= tg3_get_pauseparam, | 
 | 11145 | 	.set_pauseparam		= tg3_set_pauseparam, | 
 | 11146 | 	.get_rx_csum		= tg3_get_rx_csum, | 
 | 11147 | 	.set_rx_csum		= tg3_set_rx_csum, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11148 | 	.set_tx_csum		= tg3_set_tx_csum, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11149 | 	.set_sg			= ethtool_op_set_sg, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11150 | 	.set_tso		= tg3_set_tso, | 
| Michael Chan | 4cafd3f | 2005-05-29 14:56:34 -0700 | [diff] [blame] | 11151 | 	.self_test		= tg3_self_test, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11152 | 	.get_strings		= tg3_get_strings, | 
| Michael Chan | 4009a93 | 2005-09-05 17:52:54 -0700 | [diff] [blame] | 11153 | 	.phys_id		= tg3_phys_id, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11154 | 	.get_ethtool_stats	= tg3_get_ethtool_stats, | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 11155 | 	.get_coalesce		= tg3_get_coalesce, | 
| Michael Chan | d244c89 | 2005-07-05 14:42:33 -0700 | [diff] [blame] | 11156 | 	.set_coalesce		= tg3_set_coalesce, | 
| Jeff Garzik | b9f2c04 | 2007-10-03 18:07:32 -0700 | [diff] [blame] | 11157 | 	.get_sset_count		= tg3_get_sset_count, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11158 | }; | 
 | 11159 |  | 
 | 11160 | static void __devinit tg3_get_eeprom_size(struct tg3 *tp) | 
 | 11161 | { | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 11162 | 	u32 cursize, val, magic; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11163 |  | 
 | 11164 | 	tp->nvram_size = EEPROM_CHIP_SIZE; | 
 | 11165 |  | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 11166 | 	if (tg3_nvram_read(tp, 0, &magic) != 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11167 | 		return; | 
 | 11168 |  | 
| Michael Chan | b16250e | 2006-09-27 16:10:14 -0700 | [diff] [blame] | 11169 | 	if ((magic != TG3_EEPROM_MAGIC) && | 
 | 11170 | 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && | 
 | 11171 | 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11172 | 		return; | 
 | 11173 |  | 
 | 11174 | 	/* | 
 | 11175 | 	 * Size the chip by reading offsets at increasing powers of two. | 
 | 11176 | 	 * When we encounter our validation signature, we know the addressing | 
 | 11177 | 	 * has wrapped around, and thus have our chip size. | 
 | 11178 | 	 */ | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 11179 | 	cursize = 0x10; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11180 |  | 
 | 11181 | 	while (cursize < tp->nvram_size) { | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 11182 | 		if (tg3_nvram_read(tp, cursize, &val) != 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11183 | 			return; | 
 | 11184 |  | 
| Michael Chan | 1820180 | 2006-03-20 22:29:15 -0800 | [diff] [blame] | 11185 | 		if (val == magic) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11186 | 			break; | 
 | 11187 |  | 
 | 11188 | 		cursize <<= 1; | 
 | 11189 | 	} | 
 | 11190 |  | 
 | 11191 | 	tp->nvram_size = cursize; | 
 | 11192 | } | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 11193 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11194 | static void __devinit tg3_get_nvram_size(struct tg3 *tp) | 
 | 11195 | { | 
 | 11196 | 	u32 val; | 
 | 11197 |  | 
| Matt Carlson | df259d8 | 2009-04-20 06:57:14 +0000 | [diff] [blame] | 11198 | 	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || | 
 | 11199 | 	    tg3_nvram_read(tp, 0, &val) != 0) | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 11200 | 		return; | 
 | 11201 |  | 
 | 11202 | 	/* Selfboot format */ | 
| Michael Chan | 1820180 | 2006-03-20 22:29:15 -0800 | [diff] [blame] | 11203 | 	if (val != TG3_EEPROM_MAGIC) { | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 11204 | 		tg3_get_eeprom_size(tp); | 
 | 11205 | 		return; | 
 | 11206 | 	} | 
 | 11207 |  | 
| Matt Carlson | 6d348f2 | 2009-02-25 14:25:52 +0000 | [diff] [blame] | 11208 | 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11209 | 		if (val != 0) { | 
| Matt Carlson | 6d348f2 | 2009-02-25 14:25:52 +0000 | [diff] [blame] | 11210 | 			/* This is confusing.  We want to operate on the | 
 | 11211 | 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read() | 
 | 11212 | 			 * call will read from NVRAM and byteswap the data | 
 | 11213 | 			 * according to the byteswapping settings for all | 
 | 11214 | 			 * other register accesses.  This ensures the data we | 
 | 11215 | 			 * want will always reside in the lower 16-bits. | 
 | 11216 | 			 * However, the data in NVRAM is in LE format, which | 
 | 11217 | 			 * means the data from the NVRAM read will always be | 
 | 11218 | 			 * opposite the endianness of the CPU.  The 16-bit | 
 | 11219 | 			 * byteswap then brings the data to CPU endianness. | 
 | 11220 | 			 */ | 
 | 11221 | 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11222 | 			return; | 
 | 11223 | 		} | 
 | 11224 | 	} | 
| Matt Carlson | fd1122a | 2008-05-02 16:48:36 -0700 | [diff] [blame] | 11225 | 	tp->nvram_size = TG3_NVRAM_SIZE_512KB; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11226 | } | 
 | 11227 |  | 
 | 11228 | static void __devinit tg3_get_nvram_info(struct tg3 *tp) | 
 | 11229 | { | 
 | 11230 | 	u32 nvcfg1; | 
 | 11231 |  | 
 | 11232 | 	nvcfg1 = tr32(NVRAM_CFG1); | 
 | 11233 | 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { | 
 | 11234 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11235 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11236 | 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 
 | 11237 | 		tw32(NVRAM_CFG1, nvcfg1); | 
 | 11238 | 	} | 
 | 11239 |  | 
| Michael Chan | 4c98748 | 2005-09-05 17:52:38 -0700 | [diff] [blame] | 11240 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 11241 | 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11242 | 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11243 | 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: | 
 | 11244 | 			tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11245 | 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; | 
 | 11246 | 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11247 | 			break; | 
 | 11248 | 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: | 
 | 11249 | 			tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11250 | 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; | 
 | 11251 | 			break; | 
 | 11252 | 		case FLASH_VENDOR_ATMEL_EEPROM: | 
 | 11253 | 			tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11254 | 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 
 | 11255 | 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11256 | 			break; | 
 | 11257 | 		case FLASH_VENDOR_ST: | 
 | 11258 | 			tp->nvram_jedecnum = JEDEC_ST; | 
 | 11259 | 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; | 
 | 11260 | 			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11261 | 			break; | 
 | 11262 | 		case FLASH_VENDOR_SAIFUN: | 
 | 11263 | 			tp->nvram_jedecnum = JEDEC_SAIFUN; | 
 | 11264 | 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; | 
 | 11265 | 			break; | 
 | 11266 | 		case FLASH_VENDOR_SST_SMALL: | 
 | 11267 | 		case FLASH_VENDOR_SST_LARGE: | 
 | 11268 | 			tp->nvram_jedecnum = JEDEC_SST; | 
 | 11269 | 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; | 
 | 11270 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11271 | 		} | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11272 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11273 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11274 | 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; | 
 | 11275 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11276 | 	} | 
 | 11277 | } | 
 | 11278 |  | 
| Matt Carlson | a1b950d | 2009-09-01 13:20:17 +0000 | [diff] [blame] | 11279 | static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) | 
 | 11280 | { | 
 | 11281 | 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { | 
 | 11282 | 	case FLASH_5752PAGE_SIZE_256: | 
 | 11283 | 		tp->nvram_pagesize = 256; | 
 | 11284 | 		break; | 
 | 11285 | 	case FLASH_5752PAGE_SIZE_512: | 
 | 11286 | 		tp->nvram_pagesize = 512; | 
 | 11287 | 		break; | 
 | 11288 | 	case FLASH_5752PAGE_SIZE_1K: | 
 | 11289 | 		tp->nvram_pagesize = 1024; | 
 | 11290 | 		break; | 
 | 11291 | 	case FLASH_5752PAGE_SIZE_2K: | 
 | 11292 | 		tp->nvram_pagesize = 2048; | 
 | 11293 | 		break; | 
 | 11294 | 	case FLASH_5752PAGE_SIZE_4K: | 
 | 11295 | 		tp->nvram_pagesize = 4096; | 
 | 11296 | 		break; | 
 | 11297 | 	case FLASH_5752PAGE_SIZE_264: | 
 | 11298 | 		tp->nvram_pagesize = 264; | 
 | 11299 | 		break; | 
 | 11300 | 	case FLASH_5752PAGE_SIZE_528: | 
 | 11301 | 		tp->nvram_pagesize = 528; | 
 | 11302 | 		break; | 
 | 11303 | 	} | 
 | 11304 | } | 
 | 11305 |  | 
| Michael Chan | 361b4ac | 2005-04-21 17:11:21 -0700 | [diff] [blame] | 11306 | static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) | 
 | 11307 | { | 
 | 11308 | 	u32 nvcfg1; | 
 | 11309 |  | 
 | 11310 | 	nvcfg1 = tr32(NVRAM_CFG1); | 
 | 11311 |  | 
| Michael Chan | e6af301 | 2005-04-21 17:12:05 -0700 | [diff] [blame] | 11312 | 	/* NVRAM protection for TPM */ | 
 | 11313 | 	if (nvcfg1 & (1 << 27)) | 
| Matt Carlson | f66a29b | 2009-11-13 13:03:36 +0000 | [diff] [blame] | 11314 | 		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; | 
| Michael Chan | e6af301 | 2005-04-21 17:12:05 -0700 | [diff] [blame] | 11315 |  | 
| Michael Chan | 361b4ac | 2005-04-21 17:11:21 -0700 | [diff] [blame] | 11316 | 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11317 | 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: | 
 | 11318 | 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: | 
 | 11319 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11320 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11321 | 		break; | 
 | 11322 | 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: | 
 | 11323 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11324 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11325 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11326 | 		break; | 
 | 11327 | 	case FLASH_5752VENDOR_ST_M45PE10: | 
 | 11328 | 	case FLASH_5752VENDOR_ST_M45PE20: | 
 | 11329 | 	case FLASH_5752VENDOR_ST_M45PE40: | 
 | 11330 | 		tp->nvram_jedecnum = JEDEC_ST; | 
 | 11331 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11332 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11333 | 		break; | 
| Michael Chan | 361b4ac | 2005-04-21 17:11:21 -0700 | [diff] [blame] | 11334 | 	} | 
 | 11335 |  | 
 | 11336 | 	if (tp->tg3_flags2 & TG3_FLG2_FLASH) { | 
| Matt Carlson | a1b950d | 2009-09-01 13:20:17 +0000 | [diff] [blame] | 11337 | 		tg3_nvram_get_pagesize(tp, nvcfg1); | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11338 | 	} else { | 
| Michael Chan | 361b4ac | 2005-04-21 17:11:21 -0700 | [diff] [blame] | 11339 | 		/* For eeprom, set pagesize to maximum eeprom size */ | 
 | 11340 | 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 
 | 11341 |  | 
 | 11342 | 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 
 | 11343 | 		tw32(NVRAM_CFG1, nvcfg1); | 
 | 11344 | 	} | 
 | 11345 | } | 
 | 11346 |  | 
| Michael Chan | d3c7b88 | 2006-03-23 01:28:25 -0800 | [diff] [blame] | 11347 | static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) | 
 | 11348 | { | 
| Matt Carlson | 989a9d2 | 2007-05-05 11:51:05 -0700 | [diff] [blame] | 11349 | 	u32 nvcfg1, protect = 0; | 
| Michael Chan | d3c7b88 | 2006-03-23 01:28:25 -0800 | [diff] [blame] | 11350 |  | 
 | 11351 | 	nvcfg1 = tr32(NVRAM_CFG1); | 
 | 11352 |  | 
 | 11353 | 	/* NVRAM protection for TPM */ | 
| Matt Carlson | 989a9d2 | 2007-05-05 11:51:05 -0700 | [diff] [blame] | 11354 | 	if (nvcfg1 & (1 << 27)) { | 
| Matt Carlson | f66a29b | 2009-11-13 13:03:36 +0000 | [diff] [blame] | 11355 | 		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; | 
| Matt Carlson | 989a9d2 | 2007-05-05 11:51:05 -0700 | [diff] [blame] | 11356 | 		protect = 1; | 
 | 11357 | 	} | 
| Michael Chan | d3c7b88 | 2006-03-23 01:28:25 -0800 | [diff] [blame] | 11358 |  | 
| Matt Carlson | 989a9d2 | 2007-05-05 11:51:05 -0700 | [diff] [blame] | 11359 | 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; | 
 | 11360 | 	switch (nvcfg1) { | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11361 | 	case FLASH_5755VENDOR_ATMEL_FLASH_1: | 
 | 11362 | 	case FLASH_5755VENDOR_ATMEL_FLASH_2: | 
 | 11363 | 	case FLASH_5755VENDOR_ATMEL_FLASH_3: | 
 | 11364 | 	case FLASH_5755VENDOR_ATMEL_FLASH_5: | 
 | 11365 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11366 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11367 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11368 | 		tp->nvram_pagesize = 264; | 
 | 11369 | 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || | 
 | 11370 | 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) | 
 | 11371 | 			tp->nvram_size = (protect ? 0x3e200 : | 
 | 11372 | 					  TG3_NVRAM_SIZE_512KB); | 
 | 11373 | 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) | 
 | 11374 | 			tp->nvram_size = (protect ? 0x1f200 : | 
 | 11375 | 					  TG3_NVRAM_SIZE_256KB); | 
 | 11376 | 		else | 
 | 11377 | 			tp->nvram_size = (protect ? 0x1f200 : | 
 | 11378 | 					  TG3_NVRAM_SIZE_128KB); | 
 | 11379 | 		break; | 
 | 11380 | 	case FLASH_5752VENDOR_ST_M45PE10: | 
 | 11381 | 	case FLASH_5752VENDOR_ST_M45PE20: | 
 | 11382 | 	case FLASH_5752VENDOR_ST_M45PE40: | 
 | 11383 | 		tp->nvram_jedecnum = JEDEC_ST; | 
 | 11384 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11385 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11386 | 		tp->nvram_pagesize = 256; | 
 | 11387 | 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) | 
 | 11388 | 			tp->nvram_size = (protect ? | 
 | 11389 | 					  TG3_NVRAM_SIZE_64KB : | 
 | 11390 | 					  TG3_NVRAM_SIZE_128KB); | 
 | 11391 | 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) | 
 | 11392 | 			tp->nvram_size = (protect ? | 
 | 11393 | 					  TG3_NVRAM_SIZE_64KB : | 
 | 11394 | 					  TG3_NVRAM_SIZE_256KB); | 
 | 11395 | 		else | 
 | 11396 | 			tp->nvram_size = (protect ? | 
 | 11397 | 					  TG3_NVRAM_SIZE_128KB : | 
 | 11398 | 					  TG3_NVRAM_SIZE_512KB); | 
 | 11399 | 		break; | 
| Michael Chan | d3c7b88 | 2006-03-23 01:28:25 -0800 | [diff] [blame] | 11400 | 	} | 
 | 11401 | } | 
 | 11402 |  | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 11403 | static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) | 
 | 11404 | { | 
 | 11405 | 	u32 nvcfg1; | 
 | 11406 |  | 
 | 11407 | 	nvcfg1 = tr32(NVRAM_CFG1); | 
 | 11408 |  | 
 | 11409 | 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11410 | 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: | 
 | 11411 | 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: | 
 | 11412 | 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: | 
 | 11413 | 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: | 
 | 11414 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11415 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11416 | 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 11417 |  | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11418 | 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 
 | 11419 | 		tw32(NVRAM_CFG1, nvcfg1); | 
 | 11420 | 		break; | 
 | 11421 | 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: | 
 | 11422 | 	case FLASH_5755VENDOR_ATMEL_FLASH_1: | 
 | 11423 | 	case FLASH_5755VENDOR_ATMEL_FLASH_2: | 
 | 11424 | 	case FLASH_5755VENDOR_ATMEL_FLASH_3: | 
 | 11425 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11426 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11427 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11428 | 		tp->nvram_pagesize = 264; | 
 | 11429 | 		break; | 
 | 11430 | 	case FLASH_5752VENDOR_ST_M45PE10: | 
 | 11431 | 	case FLASH_5752VENDOR_ST_M45PE20: | 
 | 11432 | 	case FLASH_5752VENDOR_ST_M45PE40: | 
 | 11433 | 		tp->nvram_jedecnum = JEDEC_ST; | 
 | 11434 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11435 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11436 | 		tp->nvram_pagesize = 256; | 
 | 11437 | 		break; | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 11438 | 	} | 
 | 11439 | } | 
 | 11440 |  | 
| Matt Carlson | 6b91fa0 | 2007-10-10 18:01:09 -0700 | [diff] [blame] | 11441 | static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) | 
 | 11442 | { | 
 | 11443 | 	u32 nvcfg1, protect = 0; | 
 | 11444 |  | 
 | 11445 | 	nvcfg1 = tr32(NVRAM_CFG1); | 
 | 11446 |  | 
 | 11447 | 	/* NVRAM protection for TPM */ | 
 | 11448 | 	if (nvcfg1 & (1 << 27)) { | 
| Matt Carlson | f66a29b | 2009-11-13 13:03:36 +0000 | [diff] [blame] | 11449 | 		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; | 
| Matt Carlson | 6b91fa0 | 2007-10-10 18:01:09 -0700 | [diff] [blame] | 11450 | 		protect = 1; | 
 | 11451 | 	} | 
 | 11452 |  | 
 | 11453 | 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; | 
 | 11454 | 	switch (nvcfg1) { | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11455 | 	case FLASH_5761VENDOR_ATMEL_ADB021D: | 
 | 11456 | 	case FLASH_5761VENDOR_ATMEL_ADB041D: | 
 | 11457 | 	case FLASH_5761VENDOR_ATMEL_ADB081D: | 
 | 11458 | 	case FLASH_5761VENDOR_ATMEL_ADB161D: | 
 | 11459 | 	case FLASH_5761VENDOR_ATMEL_MDB021D: | 
 | 11460 | 	case FLASH_5761VENDOR_ATMEL_MDB041D: | 
 | 11461 | 	case FLASH_5761VENDOR_ATMEL_MDB081D: | 
 | 11462 | 	case FLASH_5761VENDOR_ATMEL_MDB161D: | 
 | 11463 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11464 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11465 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11466 | 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; | 
 | 11467 | 		tp->nvram_pagesize = 256; | 
 | 11468 | 		break; | 
 | 11469 | 	case FLASH_5761VENDOR_ST_A_M45PE20: | 
 | 11470 | 	case FLASH_5761VENDOR_ST_A_M45PE40: | 
 | 11471 | 	case FLASH_5761VENDOR_ST_A_M45PE80: | 
 | 11472 | 	case FLASH_5761VENDOR_ST_A_M45PE16: | 
 | 11473 | 	case FLASH_5761VENDOR_ST_M_M45PE20: | 
 | 11474 | 	case FLASH_5761VENDOR_ST_M_M45PE40: | 
 | 11475 | 	case FLASH_5761VENDOR_ST_M_M45PE80: | 
 | 11476 | 	case FLASH_5761VENDOR_ST_M_M45PE16: | 
 | 11477 | 		tp->nvram_jedecnum = JEDEC_ST; | 
 | 11478 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11479 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11480 | 		tp->nvram_pagesize = 256; | 
 | 11481 | 		break; | 
| Matt Carlson | 6b91fa0 | 2007-10-10 18:01:09 -0700 | [diff] [blame] | 11482 | 	} | 
 | 11483 |  | 
 | 11484 | 	if (protect) { | 
 | 11485 | 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); | 
 | 11486 | 	} else { | 
 | 11487 | 		switch (nvcfg1) { | 
| Matt Carlson | 8590a60 | 2009-08-28 12:29:16 +0000 | [diff] [blame] | 11488 | 		case FLASH_5761VENDOR_ATMEL_ADB161D: | 
 | 11489 | 		case FLASH_5761VENDOR_ATMEL_MDB161D: | 
 | 11490 | 		case FLASH_5761VENDOR_ST_A_M45PE16: | 
 | 11491 | 		case FLASH_5761VENDOR_ST_M_M45PE16: | 
 | 11492 | 			tp->nvram_size = TG3_NVRAM_SIZE_2MB; | 
 | 11493 | 			break; | 
 | 11494 | 		case FLASH_5761VENDOR_ATMEL_ADB081D: | 
 | 11495 | 		case FLASH_5761VENDOR_ATMEL_MDB081D: | 
 | 11496 | 		case FLASH_5761VENDOR_ST_A_M45PE80: | 
 | 11497 | 		case FLASH_5761VENDOR_ST_M_M45PE80: | 
 | 11498 | 			tp->nvram_size = TG3_NVRAM_SIZE_1MB; | 
 | 11499 | 			break; | 
 | 11500 | 		case FLASH_5761VENDOR_ATMEL_ADB041D: | 
 | 11501 | 		case FLASH_5761VENDOR_ATMEL_MDB041D: | 
 | 11502 | 		case FLASH_5761VENDOR_ST_A_M45PE40: | 
 | 11503 | 		case FLASH_5761VENDOR_ST_M_M45PE40: | 
 | 11504 | 			tp->nvram_size = TG3_NVRAM_SIZE_512KB; | 
 | 11505 | 			break; | 
 | 11506 | 		case FLASH_5761VENDOR_ATMEL_ADB021D: | 
 | 11507 | 		case FLASH_5761VENDOR_ATMEL_MDB021D: | 
 | 11508 | 		case FLASH_5761VENDOR_ST_A_M45PE20: | 
 | 11509 | 		case FLASH_5761VENDOR_ST_M_M45PE20: | 
 | 11510 | 			tp->nvram_size = TG3_NVRAM_SIZE_256KB; | 
 | 11511 | 			break; | 
| Matt Carlson | 6b91fa0 | 2007-10-10 18:01:09 -0700 | [diff] [blame] | 11512 | 		} | 
 | 11513 | 	} | 
 | 11514 | } | 
 | 11515 |  | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 11516 | static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) | 
 | 11517 | { | 
 | 11518 | 	tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11519 | 	tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11520 | 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 
 | 11521 | } | 
 | 11522 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 11523 | static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) | 
 | 11524 | { | 
 | 11525 | 	u32 nvcfg1; | 
 | 11526 |  | 
 | 11527 | 	nvcfg1 = tr32(NVRAM_CFG1); | 
 | 11528 |  | 
 | 11529 | 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 
 | 11530 | 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: | 
 | 11531 | 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: | 
 | 11532 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11533 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11534 | 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 
 | 11535 |  | 
 | 11536 | 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 
 | 11537 | 		tw32(NVRAM_CFG1, nvcfg1); | 
 | 11538 | 		return; | 
 | 11539 | 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: | 
 | 11540 | 	case FLASH_57780VENDOR_ATMEL_AT45DB011D: | 
 | 11541 | 	case FLASH_57780VENDOR_ATMEL_AT45DB011B: | 
 | 11542 | 	case FLASH_57780VENDOR_ATMEL_AT45DB021D: | 
 | 11543 | 	case FLASH_57780VENDOR_ATMEL_AT45DB021B: | 
 | 11544 | 	case FLASH_57780VENDOR_ATMEL_AT45DB041D: | 
 | 11545 | 	case FLASH_57780VENDOR_ATMEL_AT45DB041B: | 
 | 11546 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11547 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11548 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11549 |  | 
 | 11550 | 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 
 | 11551 | 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: | 
 | 11552 | 		case FLASH_57780VENDOR_ATMEL_AT45DB011D: | 
 | 11553 | 		case FLASH_57780VENDOR_ATMEL_AT45DB011B: | 
 | 11554 | 			tp->nvram_size = TG3_NVRAM_SIZE_128KB; | 
 | 11555 | 			break; | 
 | 11556 | 		case FLASH_57780VENDOR_ATMEL_AT45DB021D: | 
 | 11557 | 		case FLASH_57780VENDOR_ATMEL_AT45DB021B: | 
 | 11558 | 			tp->nvram_size = TG3_NVRAM_SIZE_256KB; | 
 | 11559 | 			break; | 
 | 11560 | 		case FLASH_57780VENDOR_ATMEL_AT45DB041D: | 
 | 11561 | 		case FLASH_57780VENDOR_ATMEL_AT45DB041B: | 
 | 11562 | 			tp->nvram_size = TG3_NVRAM_SIZE_512KB; | 
 | 11563 | 			break; | 
 | 11564 | 		} | 
 | 11565 | 		break; | 
 | 11566 | 	case FLASH_5752VENDOR_ST_M45PE10: | 
 | 11567 | 	case FLASH_5752VENDOR_ST_M45PE20: | 
 | 11568 | 	case FLASH_5752VENDOR_ST_M45PE40: | 
 | 11569 | 		tp->nvram_jedecnum = JEDEC_ST; | 
 | 11570 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11571 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11572 |  | 
 | 11573 | 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 
 | 11574 | 		case FLASH_5752VENDOR_ST_M45PE10: | 
 | 11575 | 			tp->nvram_size = TG3_NVRAM_SIZE_128KB; | 
 | 11576 | 			break; | 
 | 11577 | 		case FLASH_5752VENDOR_ST_M45PE20: | 
 | 11578 | 			tp->nvram_size = TG3_NVRAM_SIZE_256KB; | 
 | 11579 | 			break; | 
 | 11580 | 		case FLASH_5752VENDOR_ST_M45PE40: | 
 | 11581 | 			tp->nvram_size = TG3_NVRAM_SIZE_512KB; | 
 | 11582 | 			break; | 
 | 11583 | 		} | 
 | 11584 | 		break; | 
 | 11585 | 	default: | 
| Matt Carlson | df259d8 | 2009-04-20 06:57:14 +0000 | [diff] [blame] | 11586 | 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 11587 | 		return; | 
 | 11588 | 	} | 
 | 11589 |  | 
| Matt Carlson | a1b950d | 2009-09-01 13:20:17 +0000 | [diff] [blame] | 11590 | 	tg3_nvram_get_pagesize(tp, nvcfg1); | 
 | 11591 | 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 11592 | 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; | 
| Matt Carlson | a1b950d | 2009-09-01 13:20:17 +0000 | [diff] [blame] | 11593 | } | 
 | 11594 |  | 
 | 11595 |  | 
 | 11596 | static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) | 
 | 11597 | { | 
 | 11598 | 	u32 nvcfg1; | 
 | 11599 |  | 
 | 11600 | 	nvcfg1 = tr32(NVRAM_CFG1); | 
 | 11601 |  | 
 | 11602 | 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 
 | 11603 | 	case FLASH_5717VENDOR_ATMEL_EEPROM: | 
 | 11604 | 	case FLASH_5717VENDOR_MICRO_EEPROM: | 
 | 11605 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11606 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11607 | 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | 
 | 11608 |  | 
 | 11609 | 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | 
 | 11610 | 		tw32(NVRAM_CFG1, nvcfg1); | 
 | 11611 | 		return; | 
 | 11612 | 	case FLASH_5717VENDOR_ATMEL_MDB011D: | 
 | 11613 | 	case FLASH_5717VENDOR_ATMEL_ADB011B: | 
 | 11614 | 	case FLASH_5717VENDOR_ATMEL_ADB011D: | 
 | 11615 | 	case FLASH_5717VENDOR_ATMEL_MDB021D: | 
 | 11616 | 	case FLASH_5717VENDOR_ATMEL_ADB021B: | 
 | 11617 | 	case FLASH_5717VENDOR_ATMEL_ADB021D: | 
 | 11618 | 	case FLASH_5717VENDOR_ATMEL_45USPT: | 
 | 11619 | 		tp->nvram_jedecnum = JEDEC_ATMEL; | 
 | 11620 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11621 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11622 |  | 
 | 11623 | 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 
 | 11624 | 		case FLASH_5717VENDOR_ATMEL_MDB021D: | 
 | 11625 | 		case FLASH_5717VENDOR_ATMEL_ADB021B: | 
 | 11626 | 		case FLASH_5717VENDOR_ATMEL_ADB021D: | 
 | 11627 | 			tp->nvram_size = TG3_NVRAM_SIZE_256KB; | 
 | 11628 | 			break; | 
 | 11629 | 		default: | 
 | 11630 | 			tp->nvram_size = TG3_NVRAM_SIZE_128KB; | 
 | 11631 | 			break; | 
 | 11632 | 		} | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 11633 | 		break; | 
| Matt Carlson | a1b950d | 2009-09-01 13:20:17 +0000 | [diff] [blame] | 11634 | 	case FLASH_5717VENDOR_ST_M_M25PE10: | 
 | 11635 | 	case FLASH_5717VENDOR_ST_A_M25PE10: | 
 | 11636 | 	case FLASH_5717VENDOR_ST_M_M45PE10: | 
 | 11637 | 	case FLASH_5717VENDOR_ST_A_M45PE10: | 
 | 11638 | 	case FLASH_5717VENDOR_ST_M_M25PE20: | 
 | 11639 | 	case FLASH_5717VENDOR_ST_A_M25PE20: | 
 | 11640 | 	case FLASH_5717VENDOR_ST_M_M45PE20: | 
 | 11641 | 	case FLASH_5717VENDOR_ST_A_M45PE20: | 
 | 11642 | 	case FLASH_5717VENDOR_ST_25USPT: | 
 | 11643 | 	case FLASH_5717VENDOR_ST_45USPT: | 
 | 11644 | 		tp->nvram_jedecnum = JEDEC_ST; | 
 | 11645 | 		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | 
 | 11646 | 		tp->tg3_flags2 |= TG3_FLG2_FLASH; | 
 | 11647 |  | 
 | 11648 | 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 
 | 11649 | 		case FLASH_5717VENDOR_ST_M_M25PE20: | 
 | 11650 | 		case FLASH_5717VENDOR_ST_A_M25PE20: | 
 | 11651 | 		case FLASH_5717VENDOR_ST_M_M45PE20: | 
 | 11652 | 		case FLASH_5717VENDOR_ST_A_M45PE20: | 
 | 11653 | 			tp->nvram_size = TG3_NVRAM_SIZE_256KB; | 
 | 11654 | 			break; | 
 | 11655 | 		default: | 
 | 11656 | 			tp->nvram_size = TG3_NVRAM_SIZE_128KB; | 
 | 11657 | 			break; | 
 | 11658 | 		} | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 11659 | 		break; | 
| Matt Carlson | a1b950d | 2009-09-01 13:20:17 +0000 | [diff] [blame] | 11660 | 	default: | 
 | 11661 | 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM; | 
 | 11662 | 		return; | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 11663 | 	} | 
| Matt Carlson | a1b950d | 2009-09-01 13:20:17 +0000 | [diff] [blame] | 11664 |  | 
 | 11665 | 	tg3_nvram_get_pagesize(tp, nvcfg1); | 
 | 11666 | 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) | 
 | 11667 | 		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 11668 | } | 
 | 11669 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11670 | /* Chips other than 5700/5701 use the NVRAM for fetching info. */ | 
 | 11671 | static void __devinit tg3_nvram_init(struct tg3 *tp) | 
 | 11672 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11673 | 	tw32_f(GRC_EEPROM_ADDR, | 
 | 11674 | 	     (EEPROM_ADDR_FSM_RESET | | 
 | 11675 | 	      (EEPROM_DEFAULT_CLOCK_PERIOD << | 
 | 11676 | 	       EEPROM_ADDR_CLKPERD_SHIFT))); | 
 | 11677 |  | 
| Michael Chan | 9d57f01 | 2006-12-07 00:23:25 -0800 | [diff] [blame] | 11678 | 	msleep(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11679 |  | 
 | 11680 | 	/* Enable seeprom accesses. */ | 
 | 11681 | 	tw32_f(GRC_LOCAL_CTRL, | 
 | 11682 | 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); | 
 | 11683 | 	udelay(100); | 
 | 11684 |  | 
 | 11685 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 
 | 11686 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { | 
 | 11687 | 		tp->tg3_flags |= TG3_FLAG_NVRAM; | 
 | 11688 |  | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 11689 | 		if (tg3_nvram_lock(tp)) { | 
 | 11690 | 			printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, " | 
 | 11691 | 			       "tg3_nvram_init failed.\n", tp->dev->name); | 
 | 11692 | 			return; | 
 | 11693 | 		} | 
| Michael Chan | e6af301 | 2005-04-21 17:12:05 -0700 | [diff] [blame] | 11694 | 		tg3_enable_nvram_access(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11695 |  | 
| Matt Carlson | 989a9d2 | 2007-05-05 11:51:05 -0700 | [diff] [blame] | 11696 | 		tp->nvram_size = 0; | 
 | 11697 |  | 
| Michael Chan | 361b4ac | 2005-04-21 17:11:21 -0700 | [diff] [blame] | 11698 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) | 
 | 11699 | 			tg3_get_5752_nvram_info(tp); | 
| Michael Chan | d3c7b88 | 2006-03-23 01:28:25 -0800 | [diff] [blame] | 11700 | 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 
 | 11701 | 			tg3_get_5755_nvram_info(tp); | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 11702 | 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | 
| Matt Carlson | 57e6983 | 2008-05-25 23:48:31 -0700 | [diff] [blame] | 11703 | 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 
 | 11704 | 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 11705 | 			tg3_get_5787_nvram_info(tp); | 
| Matt Carlson | 6b91fa0 | 2007-10-10 18:01:09 -0700 | [diff] [blame] | 11706 | 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) | 
 | 11707 | 			tg3_get_5761_nvram_info(tp); | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 11708 | 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 
 | 11709 | 			tg3_get_5906_nvram_info(tp); | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 11710 | 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 
 | 11711 | 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 11712 | 			tg3_get_57780_nvram_info(tp); | 
| Matt Carlson | a1b950d | 2009-09-01 13:20:17 +0000 | [diff] [blame] | 11713 | 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 
 | 11714 | 			tg3_get_5717_nvram_info(tp); | 
| Michael Chan | 361b4ac | 2005-04-21 17:11:21 -0700 | [diff] [blame] | 11715 | 		else | 
 | 11716 | 			tg3_get_nvram_info(tp); | 
 | 11717 |  | 
| Matt Carlson | 989a9d2 | 2007-05-05 11:51:05 -0700 | [diff] [blame] | 11718 | 		if (tp->nvram_size == 0) | 
 | 11719 | 			tg3_get_nvram_size(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11720 |  | 
| Michael Chan | e6af301 | 2005-04-21 17:12:05 -0700 | [diff] [blame] | 11721 | 		tg3_disable_nvram_access(tp); | 
| Michael Chan | 381291b | 2005-12-13 21:08:21 -0800 | [diff] [blame] | 11722 | 		tg3_nvram_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11723 |  | 
 | 11724 | 	} else { | 
 | 11725 | 		tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); | 
 | 11726 |  | 
 | 11727 | 		tg3_get_eeprom_size(tp); | 
 | 11728 | 	} | 
 | 11729 | } | 
 | 11730 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11731 | static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, | 
 | 11732 | 				    u32 offset, u32 len, u8 *buf) | 
 | 11733 | { | 
 | 11734 | 	int i, j, rc = 0; | 
 | 11735 | 	u32 val; | 
 | 11736 |  | 
 | 11737 | 	for (i = 0; i < len; i += 4) { | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 11738 | 		u32 addr; | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 11739 | 		__be32 data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11740 |  | 
 | 11741 | 		addr = offset + i; | 
 | 11742 |  | 
 | 11743 | 		memcpy(&data, buf + i, 4); | 
 | 11744 |  | 
| Matt Carlson | 62cedd1 | 2009-04-20 14:52:29 -0700 | [diff] [blame] | 11745 | 		/* | 
 | 11746 | 		 * The SEEPROM interface expects the data to always be opposite | 
 | 11747 | 		 * the native endian format.  We accomplish this by reversing | 
 | 11748 | 		 * all the operations that would have been performed on the | 
 | 11749 | 		 * data from a call to tg3_nvram_read_be32(). | 
 | 11750 | 		 */ | 
 | 11751 | 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11752 |  | 
 | 11753 | 		val = tr32(GRC_EEPROM_ADDR); | 
 | 11754 | 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); | 
 | 11755 |  | 
 | 11756 | 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | | 
 | 11757 | 			EEPROM_ADDR_READ); | 
 | 11758 | 		tw32(GRC_EEPROM_ADDR, val | | 
 | 11759 | 			(0 << EEPROM_ADDR_DEVID_SHIFT) | | 
 | 11760 | 			(addr & EEPROM_ADDR_ADDR_MASK) | | 
 | 11761 | 			EEPROM_ADDR_START | | 
 | 11762 | 			EEPROM_ADDR_WRITE); | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 11763 |  | 
| Michael Chan | 9d57f01 | 2006-12-07 00:23:25 -0800 | [diff] [blame] | 11764 | 		for (j = 0; j < 1000; j++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11765 | 			val = tr32(GRC_EEPROM_ADDR); | 
 | 11766 |  | 
 | 11767 | 			if (val & EEPROM_ADDR_COMPLETE) | 
 | 11768 | 				break; | 
| Michael Chan | 9d57f01 | 2006-12-07 00:23:25 -0800 | [diff] [blame] | 11769 | 			msleep(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11770 | 		} | 
 | 11771 | 		if (!(val & EEPROM_ADDR_COMPLETE)) { | 
 | 11772 | 			rc = -EBUSY; | 
 | 11773 | 			break; | 
 | 11774 | 		} | 
 | 11775 | 	} | 
 | 11776 |  | 
 | 11777 | 	return rc; | 
 | 11778 | } | 
 | 11779 |  | 
 | 11780 | /* offset and length are dword aligned */ | 
 | 11781 | static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, | 
 | 11782 | 		u8 *buf) | 
 | 11783 | { | 
 | 11784 | 	int ret = 0; | 
 | 11785 | 	u32 pagesize = tp->nvram_pagesize; | 
 | 11786 | 	u32 pagemask = pagesize - 1; | 
 | 11787 | 	u32 nvram_cmd; | 
 | 11788 | 	u8 *tmp; | 
 | 11789 |  | 
 | 11790 | 	tmp = kmalloc(pagesize, GFP_KERNEL); | 
 | 11791 | 	if (tmp == NULL) | 
 | 11792 | 		return -ENOMEM; | 
 | 11793 |  | 
 | 11794 | 	while (len) { | 
 | 11795 | 		int j; | 
| Michael Chan | e6af301 | 2005-04-21 17:12:05 -0700 | [diff] [blame] | 11796 | 		u32 phy_addr, page_off, size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11797 |  | 
 | 11798 | 		phy_addr = offset & ~pagemask; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 11799 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11800 | 		for (j = 0; j < pagesize; j += 4) { | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 11801 | 			ret = tg3_nvram_read_be32(tp, phy_addr + j, | 
 | 11802 | 						  (__be32 *) (tmp + j)); | 
 | 11803 | 			if (ret) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11804 | 				break; | 
 | 11805 | 		} | 
 | 11806 | 		if (ret) | 
 | 11807 | 			break; | 
 | 11808 |  | 
 | 11809 | 	        page_off = offset & pagemask; | 
 | 11810 | 		size = pagesize; | 
 | 11811 | 		if (len < size) | 
 | 11812 | 			size = len; | 
 | 11813 |  | 
 | 11814 | 		len -= size; | 
 | 11815 |  | 
 | 11816 | 		memcpy(tmp + page_off, buf, size); | 
 | 11817 |  | 
 | 11818 | 		offset = offset + (pagesize - page_off); | 
 | 11819 |  | 
| Michael Chan | e6af301 | 2005-04-21 17:12:05 -0700 | [diff] [blame] | 11820 | 		tg3_enable_nvram_access(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11821 |  | 
 | 11822 | 		/* | 
 | 11823 | 		 * Before we can erase the flash page, we need | 
 | 11824 | 		 * to issue a special "write enable" command. | 
 | 11825 | 		 */ | 
 | 11826 | 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; | 
 | 11827 |  | 
 | 11828 | 		if (tg3_nvram_exec_cmd(tp, nvram_cmd)) | 
 | 11829 | 			break; | 
 | 11830 |  | 
 | 11831 | 		/* Erase the target page */ | 
 | 11832 | 		tw32(NVRAM_ADDR, phy_addr); | 
 | 11833 |  | 
 | 11834 | 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | | 
 | 11835 | 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; | 
 | 11836 |  | 
 | 11837 | 	        if (tg3_nvram_exec_cmd(tp, nvram_cmd)) | 
 | 11838 | 			break; | 
 | 11839 |  | 
 | 11840 | 		/* Issue another write enable to start the write. */ | 
 | 11841 | 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; | 
 | 11842 |  | 
 | 11843 | 		if (tg3_nvram_exec_cmd(tp, nvram_cmd)) | 
 | 11844 | 			break; | 
 | 11845 |  | 
 | 11846 | 		for (j = 0; j < pagesize; j += 4) { | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 11847 | 			__be32 data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11848 |  | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 11849 | 			data = *((__be32 *) (tmp + j)); | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 11850 |  | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 11851 | 			tw32(NVRAM_WRDATA, be32_to_cpu(data)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11852 |  | 
 | 11853 | 			tw32(NVRAM_ADDR, phy_addr + j); | 
 | 11854 |  | 
 | 11855 | 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | | 
 | 11856 | 				NVRAM_CMD_WR; | 
 | 11857 |  | 
 | 11858 | 			if (j == 0) | 
 | 11859 | 				nvram_cmd |= NVRAM_CMD_FIRST; | 
 | 11860 | 			else if (j == (pagesize - 4)) | 
 | 11861 | 				nvram_cmd |= NVRAM_CMD_LAST; | 
 | 11862 |  | 
 | 11863 | 			if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) | 
 | 11864 | 				break; | 
 | 11865 | 		} | 
 | 11866 | 		if (ret) | 
 | 11867 | 			break; | 
 | 11868 | 	} | 
 | 11869 |  | 
 | 11870 | 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; | 
 | 11871 | 	tg3_nvram_exec_cmd(tp, nvram_cmd); | 
 | 11872 |  | 
 | 11873 | 	kfree(tmp); | 
 | 11874 |  | 
 | 11875 | 	return ret; | 
 | 11876 | } | 
 | 11877 |  | 
 | 11878 | /* offset and length are dword aligned */ | 
 | 11879 | static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, | 
 | 11880 | 		u8 *buf) | 
 | 11881 | { | 
 | 11882 | 	int i, ret = 0; | 
 | 11883 |  | 
 | 11884 | 	for (i = 0; i < len; i += 4, offset += 4) { | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 11885 | 		u32 page_off, phy_addr, nvram_cmd; | 
 | 11886 | 		__be32 data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11887 |  | 
 | 11888 | 		memcpy(&data, buf + i, 4); | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 11889 | 		tw32(NVRAM_WRDATA, be32_to_cpu(data)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11890 |  | 
 | 11891 | 	        page_off = offset % tp->nvram_pagesize; | 
 | 11892 |  | 
| Michael Chan | 1820180 | 2006-03-20 22:29:15 -0800 | [diff] [blame] | 11893 | 		phy_addr = tg3_nvram_phys_addr(tp, offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11894 |  | 
 | 11895 | 		tw32(NVRAM_ADDR, phy_addr); | 
 | 11896 |  | 
 | 11897 | 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; | 
 | 11898 |  | 
 | 11899 | 	        if ((page_off == 0) || (i == 0)) | 
 | 11900 | 			nvram_cmd |= NVRAM_CMD_FIRST; | 
| Michael Chan | f6d9a25 | 2006-04-29 19:00:24 -0700 | [diff] [blame] | 11901 | 		if (page_off == (tp->nvram_pagesize - 4)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11902 | 			nvram_cmd |= NVRAM_CMD_LAST; | 
 | 11903 |  | 
 | 11904 | 		if (i == (len - 4)) | 
 | 11905 | 			nvram_cmd |= NVRAM_CMD_LAST; | 
 | 11906 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 11907 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && | 
 | 11908 | 		    !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && | 
| Michael Chan | 4c98748 | 2005-09-05 17:52:38 -0700 | [diff] [blame] | 11909 | 		    (tp->nvram_jedecnum == JEDEC_ST) && | 
 | 11910 | 		    (nvram_cmd & NVRAM_CMD_FIRST)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11911 |  | 
 | 11912 | 			if ((ret = tg3_nvram_exec_cmd(tp, | 
 | 11913 | 				NVRAM_CMD_WREN | NVRAM_CMD_GO | | 
 | 11914 | 				NVRAM_CMD_DONE))) | 
 | 11915 |  | 
 | 11916 | 				break; | 
 | 11917 | 		} | 
 | 11918 | 		if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { | 
 | 11919 | 			/* We always do complete word writes to eeprom. */ | 
 | 11920 | 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); | 
 | 11921 | 		} | 
 | 11922 |  | 
 | 11923 | 		if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) | 
 | 11924 | 			break; | 
 | 11925 | 	} | 
 | 11926 | 	return ret; | 
 | 11927 | } | 
 | 11928 |  | 
 | 11929 | /* offset and length are dword aligned */ | 
 | 11930 | static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) | 
 | 11931 | { | 
 | 11932 | 	int ret; | 
 | 11933 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11934 | 	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { | 
| Michael Chan | 314fba3 | 2005-04-21 17:07:04 -0700 | [diff] [blame] | 11935 | 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & | 
 | 11936 | 		       ~GRC_LCLCTRL_GPIO_OUTPUT1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11937 | 		udelay(40); | 
 | 11938 | 	} | 
 | 11939 |  | 
 | 11940 | 	if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { | 
 | 11941 | 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); | 
 | 11942 | 	} | 
 | 11943 | 	else { | 
 | 11944 | 		u32 grc_mode; | 
 | 11945 |  | 
| Michael Chan | ec41c7d | 2006-01-17 02:40:55 -0800 | [diff] [blame] | 11946 | 		ret = tg3_nvram_lock(tp); | 
 | 11947 | 		if (ret) | 
 | 11948 | 			return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11949 |  | 
| Michael Chan | e6af301 | 2005-04-21 17:12:05 -0700 | [diff] [blame] | 11950 | 		tg3_enable_nvram_access(tp); | 
 | 11951 | 		if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 
| Matt Carlson | f66a29b | 2009-11-13 13:03:36 +0000 | [diff] [blame] | 11952 | 		    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11953 | 			tw32(NVRAM_WRITE1, 0x406); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11954 |  | 
 | 11955 | 		grc_mode = tr32(GRC_MODE); | 
 | 11956 | 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); | 
 | 11957 |  | 
 | 11958 | 		if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) || | 
 | 11959 | 			!(tp->tg3_flags2 & TG3_FLG2_FLASH)) { | 
 | 11960 |  | 
 | 11961 | 			ret = tg3_nvram_write_block_buffered(tp, offset, len, | 
 | 11962 | 				buf); | 
 | 11963 | 		} | 
 | 11964 | 		else { | 
 | 11965 | 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len, | 
 | 11966 | 				buf); | 
 | 11967 | 		} | 
 | 11968 |  | 
 | 11969 | 		grc_mode = tr32(GRC_MODE); | 
 | 11970 | 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); | 
 | 11971 |  | 
| Michael Chan | e6af301 | 2005-04-21 17:12:05 -0700 | [diff] [blame] | 11972 | 		tg3_disable_nvram_access(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11973 | 		tg3_nvram_unlock(tp); | 
 | 11974 | 	} | 
 | 11975 |  | 
 | 11976 | 	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) { | 
| Michael Chan | 314fba3 | 2005-04-21 17:07:04 -0700 | [diff] [blame] | 11977 | 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11978 | 		udelay(40); | 
 | 11979 | 	} | 
 | 11980 |  | 
 | 11981 | 	return ret; | 
 | 11982 | } | 
 | 11983 |  | 
 | 11984 | struct subsys_tbl_ent { | 
 | 11985 | 	u16 subsys_vendor, subsys_devid; | 
 | 11986 | 	u32 phy_id; | 
 | 11987 | }; | 
 | 11988 |  | 
 | 11989 | static struct subsys_tbl_ent subsys_id_to_phy_id[] = { | 
 | 11990 | 	/* Broadcom boards. */ | 
 | 11991 | 	{ PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */ | 
 | 11992 | 	{ PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */ | 
 | 11993 | 	{ PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */ | 
 | 11994 | 	{ PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },		    /* BCM95700A9 */ | 
 | 11995 | 	{ PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */ | 
 | 11996 | 	{ PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */ | 
 | 11997 | 	{ PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },		    /* BCM95701A7 */ | 
 | 11998 | 	{ PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */ | 
 | 11999 | 	{ PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */ | 
 | 12000 | 	{ PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */ | 
 | 12001 | 	{ PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */ | 
 | 12002 |  | 
 | 12003 | 	/* 3com boards. */ | 
 | 12004 | 	{ PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */ | 
 | 12005 | 	{ PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */ | 
 | 12006 | 	{ PCI_VENDOR_ID_3COM, 0x1004, 0 },		/* 3C996SX */ | 
 | 12007 | 	{ PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */ | 
 | 12008 | 	{ PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */ | 
 | 12009 |  | 
 | 12010 | 	/* DELL boards. */ | 
 | 12011 | 	{ PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */ | 
 | 12012 | 	{ PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */ | 
 | 12013 | 	{ PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */ | 
 | 12014 | 	{ PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */ | 
 | 12015 |  | 
 | 12016 | 	/* Compaq boards. */ | 
 | 12017 | 	{ PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */ | 
 | 12018 | 	{ PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */ | 
 | 12019 | 	{ PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },		  /* CHANGELING */ | 
 | 12020 | 	{ PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */ | 
 | 12021 | 	{ PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */ | 
 | 12022 |  | 
 | 12023 | 	/* IBM boards. */ | 
 | 12024 | 	{ PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */ | 
 | 12025 | }; | 
 | 12026 |  | 
 | 12027 | static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp) | 
 | 12028 | { | 
 | 12029 | 	int i; | 
 | 12030 |  | 
 | 12031 | 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { | 
 | 12032 | 		if ((subsys_id_to_phy_id[i].subsys_vendor == | 
 | 12033 | 		     tp->pdev->subsystem_vendor) && | 
 | 12034 | 		    (subsys_id_to_phy_id[i].subsys_devid == | 
 | 12035 | 		     tp->pdev->subsystem_device)) | 
 | 12036 | 			return &subsys_id_to_phy_id[i]; | 
 | 12037 | 	} | 
 | 12038 | 	return NULL; | 
 | 12039 | } | 
 | 12040 |  | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 12041 | static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12042 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12043 | 	u32 val; | 
| Michael Chan | caf636c7 | 2006-03-22 01:05:31 -0800 | [diff] [blame] | 12044 | 	u16 pmcsr; | 
 | 12045 |  | 
 | 12046 | 	/* On some early chips the SRAM cannot be accessed in D3hot state, | 
 | 12047 | 	 * so need make sure we're in D0. | 
 | 12048 | 	 */ | 
 | 12049 | 	pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr); | 
 | 12050 | 	pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | 
 | 12051 | 	pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr); | 
 | 12052 | 	msleep(1); | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 12053 |  | 
 | 12054 | 	/* Make sure register accesses (indirect or otherwise) | 
 | 12055 | 	 * will function correctly. | 
 | 12056 | 	 */ | 
 | 12057 | 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 
 | 12058 | 			       tp->misc_host_ctrl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12059 |  | 
| David S. Miller | f49639e | 2006-06-09 11:58:36 -0700 | [diff] [blame] | 12060 | 	/* The memory arbiter has to be enabled in order for SRAM accesses | 
 | 12061 | 	 * to succeed.  Normally on powerup the tg3 chip firmware will make | 
 | 12062 | 	 * sure it is enabled, but other entities such as system netboot | 
 | 12063 | 	 * code might disable it. | 
 | 12064 | 	 */ | 
 | 12065 | 	val = tr32(MEMARB_MODE); | 
 | 12066 | 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); | 
 | 12067 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12068 | 	tp->phy_id = PHY_ID_INVALID; | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 12069 | 	tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 
 | 12070 |  | 
| Gary Zambrano | a85feb8 | 2007-05-05 11:52:19 -0700 | [diff] [blame] | 12071 | 	/* Assume an onboard device and WOL capable by default.  */ | 
 | 12072 | 	tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP; | 
| David S. Miller | 72b845e | 2006-03-14 14:11:48 -0800 | [diff] [blame] | 12073 |  | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 12074 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 12075 | 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 12076 | 			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 12077 | 			tp->tg3_flags2 |= TG3_FLG2_IS_NIC; | 
 | 12078 | 		} | 
| Matt Carlson | 0527ba3 | 2007-10-10 18:03:30 -0700 | [diff] [blame] | 12079 | 		val = tr32(VCPU_CFGSHDW); | 
 | 12080 | 		if (val & VCPU_CFGSHDW_ASPM_DBNC) | 
| Matt Carlson | 8ed5d97 | 2007-05-07 00:25:49 -0700 | [diff] [blame] | 12081 | 			tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; | 
| Matt Carlson | 0527ba3 | 2007-10-10 18:03:30 -0700 | [diff] [blame] | 12082 | 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) && | 
| Matt Carlson | 2023276 | 2008-12-21 20:18:56 -0800 | [diff] [blame] | 12083 | 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) | 
| Matt Carlson | 0527ba3 | 2007-10-10 18:03:30 -0700 | [diff] [blame] | 12084 | 			tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 12085 | 		goto done; | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 12086 | 	} | 
 | 12087 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12088 | 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); | 
 | 12089 | 	if (val == NIC_SRAM_DATA_SIG_MAGIC) { | 
 | 12090 | 		u32 nic_cfg, led_cfg; | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 12091 | 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 12092 | 		int eeprom_phy_serdes = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12093 |  | 
 | 12094 | 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); | 
 | 12095 | 		tp->nic_sram_data_cfg = nic_cfg; | 
 | 12096 |  | 
 | 12097 | 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); | 
 | 12098 | 		ver >>= NIC_SRAM_DATA_VER_SHIFT; | 
 | 12099 | 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) && | 
 | 12100 | 		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) && | 
 | 12101 | 		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) && | 
 | 12102 | 		    (ver > 0) && (ver < 0x100)) | 
 | 12103 | 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); | 
 | 12104 |  | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 12105 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | 
 | 12106 | 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); | 
 | 12107 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12108 | 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == | 
 | 12109 | 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) | 
 | 12110 | 			eeprom_phy_serdes = 1; | 
 | 12111 |  | 
 | 12112 | 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); | 
 | 12113 | 		if (nic_phy_id != 0) { | 
 | 12114 | 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; | 
 | 12115 | 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; | 
 | 12116 |  | 
 | 12117 | 			eeprom_phy_id  = (id1 >> 16) << 10; | 
 | 12118 | 			eeprom_phy_id |= (id2 & 0xfc00) << 16; | 
 | 12119 | 			eeprom_phy_id |= (id2 & 0x03ff) <<  0; | 
 | 12120 | 		} else | 
 | 12121 | 			eeprom_phy_id = 0; | 
 | 12122 |  | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 12123 | 		tp->phy_id = eeprom_phy_id; | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 12124 | 		if (eeprom_phy_serdes) { | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 12125 | 			if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 12126 | 				tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; | 
 | 12127 | 			else | 
 | 12128 | 				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; | 
 | 12129 | 		} | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 12130 |  | 
| John W. Linville | cbf4685 | 2005-04-21 17:01:29 -0700 | [diff] [blame] | 12131 | 		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12132 | 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | | 
 | 12133 | 				    SHASTA_EXT_LED_MODE_MASK); | 
| John W. Linville | cbf4685 | 2005-04-21 17:01:29 -0700 | [diff] [blame] | 12134 | 		else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12135 | 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; | 
 | 12136 |  | 
 | 12137 | 		switch (led_cfg) { | 
 | 12138 | 		default: | 
 | 12139 | 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: | 
 | 12140 | 			tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 
 | 12141 | 			break; | 
 | 12142 |  | 
 | 12143 | 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: | 
 | 12144 | 			tp->led_ctrl = LED_CTRL_MODE_PHY_2; | 
 | 12145 | 			break; | 
 | 12146 |  | 
 | 12147 | 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC: | 
 | 12148 | 			tp->led_ctrl = LED_CTRL_MODE_MAC; | 
| Michael Chan | 9ba2779 | 2005-06-06 15:16:20 -0700 | [diff] [blame] | 12149 |  | 
 | 12150 | 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is | 
 | 12151 | 			 * read on some older 5700/5701 bootcode. | 
 | 12152 | 			 */ | 
 | 12153 | 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == | 
 | 12154 | 			    ASIC_REV_5700 || | 
 | 12155 | 			    GET_ASIC_REV(tp->pci_chip_rev_id) == | 
 | 12156 | 			    ASIC_REV_5701) | 
 | 12157 | 				tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 
 | 12158 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12159 | 			break; | 
 | 12160 |  | 
 | 12161 | 		case SHASTA_EXT_LED_SHARED: | 
 | 12162 | 			tp->led_ctrl = LED_CTRL_MODE_SHARED; | 
 | 12163 | 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && | 
 | 12164 | 			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) | 
 | 12165 | 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | | 
 | 12166 | 						 LED_CTRL_MODE_PHY_2); | 
 | 12167 | 			break; | 
 | 12168 |  | 
 | 12169 | 		case SHASTA_EXT_LED_MAC: | 
 | 12170 | 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; | 
 | 12171 | 			break; | 
 | 12172 |  | 
 | 12173 | 		case SHASTA_EXT_LED_COMBO: | 
 | 12174 | 			tp->led_ctrl = LED_CTRL_MODE_COMBO; | 
 | 12175 | 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) | 
 | 12176 | 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | | 
 | 12177 | 						 LED_CTRL_MODE_PHY_2); | 
 | 12178 | 			break; | 
 | 12179 |  | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 12180 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12181 |  | 
 | 12182 | 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 12183 | 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && | 
 | 12184 | 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) | 
 | 12185 | 			tp->led_ctrl = LED_CTRL_MODE_PHY_2; | 
 | 12186 |  | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 12187 | 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) | 
 | 12188 | 			tp->led_ctrl = LED_CTRL_MODE_PHY_1; | 
| Matt Carlson | 5f60891 | 2007-11-12 21:17:07 -0800 | [diff] [blame] | 12189 |  | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 12190 | 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12191 | 			tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT; | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 12192 | 			if ((tp->pdev->subsystem_vendor == | 
 | 12193 | 			     PCI_VENDOR_ID_ARIMA) && | 
 | 12194 | 			    (tp->pdev->subsystem_device == 0x205a || | 
 | 12195 | 			     tp->pdev->subsystem_device == 0x2063)) | 
 | 12196 | 				tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | 
 | 12197 | 		} else { | 
| David S. Miller | f49639e | 2006-06-09 11:58:36 -0700 | [diff] [blame] | 12198 | 			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 12199 | 			tp->tg3_flags2 |= TG3_FLG2_IS_NIC; | 
 | 12200 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12201 |  | 
 | 12202 | 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { | 
 | 12203 | 			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF; | 
| John W. Linville | cbf4685 | 2005-04-21 17:01:29 -0700 | [diff] [blame] | 12204 | 			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12205 | 				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; | 
 | 12206 | 		} | 
| Matt Carlson | b2b98d4 | 2008-11-03 16:52:32 -0800 | [diff] [blame] | 12207 |  | 
 | 12208 | 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && | 
 | 12209 | 			(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 12210 | 			tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; | 
| Matt Carlson | b2b98d4 | 2008-11-03 16:52:32 -0800 | [diff] [blame] | 12211 |  | 
| Gary Zambrano | a85feb8 | 2007-05-05 11:52:19 -0700 | [diff] [blame] | 12212 | 		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && | 
 | 12213 | 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) | 
 | 12214 | 			tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12215 |  | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 12216 | 		if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 12217 | 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) | 
| Matt Carlson | 0527ba3 | 2007-10-10 18:03:30 -0700 | [diff] [blame] | 12218 | 			tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; | 
 | 12219 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12220 | 		if (cfg2 & (1 << 17)) | 
 | 12221 | 			tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; | 
 | 12222 |  | 
 | 12223 | 		/* serdes signal pre-emphasis in register 0x590 set by */ | 
 | 12224 | 		/* bootcode if bit 18 is set */ | 
 | 12225 | 		if (cfg2 & (1 << 18)) | 
 | 12226 | 			tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; | 
| Matt Carlson | 8ed5d97 | 2007-05-07 00:25:49 -0700 | [diff] [blame] | 12227 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 12228 | 		if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 
 | 12229 | 		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && | 
| Matt Carlson | 6833c04 | 2008-11-21 17:18:59 -0800 | [diff] [blame] | 12230 | 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) | 
 | 12231 | 			tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD; | 
 | 12232 |  | 
| Matt Carlson | 8ed5d97 | 2007-05-07 00:25:49 -0700 | [diff] [blame] | 12233 | 		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 
 | 12234 | 			u32 cfg3; | 
 | 12235 |  | 
 | 12236 | 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); | 
 | 12237 | 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) | 
 | 12238 | 				tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; | 
 | 12239 | 		} | 
| Matt Carlson | a9daf36 | 2008-05-25 23:49:44 -0700 | [diff] [blame] | 12240 |  | 
 | 12241 | 		if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE) | 
 | 12242 | 			tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE; | 
 | 12243 | 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) | 
 | 12244 | 			tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN; | 
 | 12245 | 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) | 
 | 12246 | 			tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12247 | 	} | 
| Matt Carlson | 05ac4cb | 2008-11-03 16:53:46 -0800 | [diff] [blame] | 12248 | done: | 
 | 12249 | 	device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); | 
 | 12250 | 	device_set_wakeup_enable(&tp->pdev->dev, | 
 | 12251 | 				 tp->tg3_flags & TG3_FLAG_WOL_ENABLE); | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 12252 | } | 
 | 12253 |  | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 12254 | static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) | 
 | 12255 | { | 
 | 12256 | 	int i; | 
 | 12257 | 	u32 val; | 
 | 12258 |  | 
 | 12259 | 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); | 
 | 12260 | 	tw32(OTP_CTRL, cmd); | 
 | 12261 |  | 
 | 12262 | 	/* Wait for up to 1 ms for command to execute. */ | 
 | 12263 | 	for (i = 0; i < 100; i++) { | 
 | 12264 | 		val = tr32(OTP_STATUS); | 
 | 12265 | 		if (val & OTP_STATUS_CMD_DONE) | 
 | 12266 | 			break; | 
 | 12267 | 		udelay(10); | 
 | 12268 | 	} | 
 | 12269 |  | 
 | 12270 | 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; | 
 | 12271 | } | 
 | 12272 |  | 
 | 12273 | /* Read the gphy configuration from the OTP region of the chip.  The gphy | 
 | 12274 |  * configuration is a 32-bit value that straddles the alignment boundary. | 
 | 12275 |  * We do two 32-bit reads and then shift and merge the results. | 
 | 12276 |  */ | 
 | 12277 | static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) | 
 | 12278 | { | 
 | 12279 | 	u32 bhalf_otp, thalf_otp; | 
 | 12280 |  | 
 | 12281 | 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); | 
 | 12282 |  | 
 | 12283 | 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) | 
 | 12284 | 		return 0; | 
 | 12285 |  | 
 | 12286 | 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); | 
 | 12287 |  | 
 | 12288 | 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) | 
 | 12289 | 		return 0; | 
 | 12290 |  | 
 | 12291 | 	thalf_otp = tr32(OTP_READ_DATA); | 
 | 12292 |  | 
 | 12293 | 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); | 
 | 12294 |  | 
 | 12295 | 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) | 
 | 12296 | 		return 0; | 
 | 12297 |  | 
 | 12298 | 	bhalf_otp = tr32(OTP_READ_DATA); | 
 | 12299 |  | 
 | 12300 | 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); | 
 | 12301 | } | 
 | 12302 |  | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 12303 | static int __devinit tg3_phy_probe(struct tg3 *tp) | 
 | 12304 | { | 
 | 12305 | 	u32 hw_phy_id_1, hw_phy_id_2; | 
 | 12306 | 	u32 hw_phy_id, hw_phy_id_masked; | 
 | 12307 | 	int err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12308 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 12309 | 	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) | 
 | 12310 | 		return tg3_phy_init(tp); | 
 | 12311 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12312 | 	/* Reading the PHY ID register can conflict with ASF | 
| Nick Andrew | 877d031 | 2009-01-26 11:06:57 +0100 | [diff] [blame] | 12313 | 	 * firmware access to the PHY hardware. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12314 | 	 */ | 
 | 12315 | 	err = 0; | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 12316 | 	if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || | 
 | 12317 | 	    (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12318 | 		hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID; | 
 | 12319 | 	} else { | 
 | 12320 | 		/* Now read the physical PHY_ID from the chip and verify | 
 | 12321 | 		 * that it is sane.  If it doesn't look good, we fall back | 
 | 12322 | 		 * to either the hard-coded table based PHY_ID and failing | 
 | 12323 | 		 * that the value found in the eeprom area. | 
 | 12324 | 		 */ | 
 | 12325 | 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); | 
 | 12326 | 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); | 
 | 12327 |  | 
 | 12328 | 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10; | 
 | 12329 | 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; | 
 | 12330 | 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0; | 
 | 12331 |  | 
 | 12332 | 		hw_phy_id_masked = hw_phy_id & PHY_ID_MASK; | 
 | 12333 | 	} | 
 | 12334 |  | 
 | 12335 | 	if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) { | 
 | 12336 | 		tp->phy_id = hw_phy_id; | 
 | 12337 | 		if (hw_phy_id_masked == PHY_ID_BCM8002) | 
 | 12338 | 			tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; | 
| Michael Chan | da6b2d0 | 2005-08-19 12:54:29 -0700 | [diff] [blame] | 12339 | 		else | 
 | 12340 | 			tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12341 | 	} else { | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 12342 | 		if (tp->phy_id != PHY_ID_INVALID) { | 
 | 12343 | 			/* Do nothing, phy ID already set up in | 
 | 12344 | 			 * tg3_get_eeprom_hw_cfg(). | 
 | 12345 | 			 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12346 | 		} else { | 
 | 12347 | 			struct subsys_tbl_ent *p; | 
 | 12348 |  | 
 | 12349 | 			/* No eeprom signature?  Try the hardcoded | 
 | 12350 | 			 * subsys device table. | 
 | 12351 | 			 */ | 
 | 12352 | 			p = lookup_by_subsys(tp); | 
 | 12353 | 			if (!p) | 
 | 12354 | 				return -ENODEV; | 
 | 12355 |  | 
 | 12356 | 			tp->phy_id = p->phy_id; | 
 | 12357 | 			if (!tp->phy_id || | 
 | 12358 | 			    tp->phy_id == PHY_ID_BCM8002) | 
 | 12359 | 				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; | 
 | 12360 | 		} | 
 | 12361 | 	} | 
 | 12362 |  | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 12363 | 	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 12364 | 	    !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12365 | 	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { | 
| Michael Chan | 3600d91 | 2006-12-07 00:21:48 -0800 | [diff] [blame] | 12366 | 		u32 bmsr, adv_reg, tg3_ctrl, mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12367 |  | 
 | 12368 | 		tg3_readphy(tp, MII_BMSR, &bmsr); | 
 | 12369 | 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 
 | 12370 | 		    (bmsr & BMSR_LSTATUS)) | 
 | 12371 | 			goto skip_phy_reset; | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 12372 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12373 | 		err = tg3_phy_reset(tp); | 
 | 12374 | 		if (err) | 
 | 12375 | 			return err; | 
 | 12376 |  | 
 | 12377 | 		adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL | | 
 | 12378 | 			   ADVERTISE_100HALF | ADVERTISE_100FULL | | 
 | 12379 | 			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); | 
 | 12380 | 		tg3_ctrl = 0; | 
 | 12381 | 		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) { | 
 | 12382 | 			tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF | | 
 | 12383 | 				    MII_TG3_CTRL_ADV_1000_FULL); | 
 | 12384 | 			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 
 | 12385 | 			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) | 
 | 12386 | 				tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER | | 
 | 12387 | 					     MII_TG3_CTRL_ENABLE_AS_MASTER); | 
 | 12388 | 		} | 
 | 12389 |  | 
| Michael Chan | 3600d91 | 2006-12-07 00:21:48 -0800 | [diff] [blame] | 12390 | 		mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | 
 | 12391 | 			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | 
 | 12392 | 			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); | 
 | 12393 | 		if (!tg3_copper_is_advertising_all(tp, mask)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12394 | 			tg3_writephy(tp, MII_ADVERTISE, adv_reg); | 
 | 12395 |  | 
 | 12396 | 			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) | 
 | 12397 | 				tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); | 
 | 12398 |  | 
 | 12399 | 			tg3_writephy(tp, MII_BMCR, | 
 | 12400 | 				     BMCR_ANENABLE | BMCR_ANRESTART); | 
 | 12401 | 		} | 
 | 12402 | 		tg3_phy_set_wirespeed(tp); | 
 | 12403 |  | 
 | 12404 | 		tg3_writephy(tp, MII_ADVERTISE, adv_reg); | 
 | 12405 | 		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) | 
 | 12406 | 			tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl); | 
 | 12407 | 	} | 
 | 12408 |  | 
 | 12409 | skip_phy_reset: | 
 | 12410 | 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { | 
 | 12411 | 		err = tg3_init_5401phy_dsp(tp); | 
 | 12412 | 		if (err) | 
 | 12413 | 			return err; | 
 | 12414 | 	} | 
 | 12415 |  | 
 | 12416 | 	if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) { | 
 | 12417 | 		err = tg3_init_5401phy_dsp(tp); | 
 | 12418 | 	} | 
 | 12419 |  | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 12420 | 	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12421 | 		tp->link_config.advertising = | 
 | 12422 | 			(ADVERTISED_1000baseT_Half | | 
 | 12423 | 			 ADVERTISED_1000baseT_Full | | 
 | 12424 | 			 ADVERTISED_Autoneg | | 
 | 12425 | 			 ADVERTISED_FIBRE); | 
 | 12426 | 	if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) | 
 | 12427 | 		tp->link_config.advertising &= | 
 | 12428 | 			~(ADVERTISED_1000baseT_Half | | 
 | 12429 | 			  ADVERTISED_1000baseT_Full); | 
 | 12430 |  | 
 | 12431 | 	return err; | 
 | 12432 | } | 
 | 12433 |  | 
 | 12434 | static void __devinit tg3_read_partno(struct tg3 *tp) | 
 | 12435 | { | 
| Matt Carlson | 141518c | 2009-12-03 08:36:22 +0000 | [diff] [blame] | 12436 | 	unsigned char vpd_data[TG3_NVM_VPD_LEN];   /* in little-endian format */ | 
| Michael Chan | af2c6a4 | 2006-11-07 14:57:51 -0800 | [diff] [blame] | 12437 | 	unsigned int i; | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 12438 | 	u32 magic; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12439 |  | 
| Matt Carlson | df259d8 | 2009-04-20 06:57:14 +0000 | [diff] [blame] | 12440 | 	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || | 
 | 12441 | 	    tg3_nvram_read(tp, 0x0, &magic)) | 
| David S. Miller | f49639e | 2006-06-09 11:58:36 -0700 | [diff] [blame] | 12442 | 		goto out_not_found; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12443 |  | 
| Michael Chan | 1820180 | 2006-03-20 22:29:15 -0800 | [diff] [blame] | 12444 | 	if (magic == TG3_EEPROM_MAGIC) { | 
| Matt Carlson | 141518c | 2009-12-03 08:36:22 +0000 | [diff] [blame] | 12445 | 		for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 12446 | 			u32 tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12447 |  | 
| Matt Carlson | 6d348f2 | 2009-02-25 14:25:52 +0000 | [diff] [blame] | 12448 | 			/* The data is in little-endian format in NVRAM. | 
 | 12449 | 			 * Use the big-endian read routines to preserve | 
 | 12450 | 			 * the byte order as it exists in NVRAM. | 
 | 12451 | 			 */ | 
| Matt Carlson | 141518c | 2009-12-03 08:36:22 +0000 | [diff] [blame] | 12452 | 			if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp)) | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 12453 | 				goto out_not_found; | 
 | 12454 |  | 
| Matt Carlson | 6d348f2 | 2009-02-25 14:25:52 +0000 | [diff] [blame] | 12455 | 			memcpy(&vpd_data[i], &tmp, sizeof(tmp)); | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 12456 | 		} | 
 | 12457 | 	} else { | 
| Matt Carlson | 94c982b | 2009-12-03 08:36:23 +0000 | [diff] [blame] | 12458 | 		ssize_t cnt; | 
 | 12459 | 		unsigned int pos = 0, i = 0; | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 12460 |  | 
| Matt Carlson | 94c982b | 2009-12-03 08:36:23 +0000 | [diff] [blame] | 12461 | 		for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) { | 
 | 12462 | 			cnt = pci_read_vpd(tp->pdev, pos, | 
 | 12463 | 					   TG3_NVM_VPD_LEN - pos, | 
 | 12464 | 					   &vpd_data[pos]); | 
 | 12465 | 			if (cnt == -ETIMEDOUT || -EINTR) | 
 | 12466 | 				cnt = 0; | 
 | 12467 | 			else if (cnt < 0) | 
| David S. Miller | f49639e | 2006-06-09 11:58:36 -0700 | [diff] [blame] | 12468 | 				goto out_not_found; | 
| Michael Chan | 1b27777 | 2006-03-20 22:27:48 -0800 | [diff] [blame] | 12469 | 		} | 
| Matt Carlson | 94c982b | 2009-12-03 08:36:23 +0000 | [diff] [blame] | 12470 | 		if (pos != TG3_NVM_VPD_LEN) | 
 | 12471 | 			goto out_not_found; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12472 | 	} | 
 | 12473 |  | 
 | 12474 | 	/* Now parse and find the part number. */ | 
| Matt Carlson | 141518c | 2009-12-03 08:36:22 +0000 | [diff] [blame] | 12475 | 	for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12476 | 		unsigned char val = vpd_data[i]; | 
| Michael Chan | af2c6a4 | 2006-11-07 14:57:51 -0800 | [diff] [blame] | 12477 | 		unsigned int block_end; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12478 |  | 
 | 12479 | 		if (val == 0x82 || val == 0x91) { | 
 | 12480 | 			i = (i + 3 + | 
 | 12481 | 			     (vpd_data[i + 1] + | 
 | 12482 | 			      (vpd_data[i + 2] << 8))); | 
 | 12483 | 			continue; | 
 | 12484 | 		} | 
 | 12485 |  | 
 | 12486 | 		if (val != 0x90) | 
 | 12487 | 			goto out_not_found; | 
 | 12488 |  | 
 | 12489 | 		block_end = (i + 3 + | 
 | 12490 | 			     (vpd_data[i + 1] + | 
 | 12491 | 			      (vpd_data[i + 2] << 8))); | 
 | 12492 | 		i += 3; | 
| Michael Chan | af2c6a4 | 2006-11-07 14:57:51 -0800 | [diff] [blame] | 12493 |  | 
| Matt Carlson | 141518c | 2009-12-03 08:36:22 +0000 | [diff] [blame] | 12494 | 		if (block_end > TG3_NVM_VPD_LEN) | 
| Michael Chan | af2c6a4 | 2006-11-07 14:57:51 -0800 | [diff] [blame] | 12495 | 			goto out_not_found; | 
 | 12496 |  | 
 | 12497 | 		while (i < (block_end - 2)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12498 | 			if (vpd_data[i + 0] == 'P' && | 
 | 12499 | 			    vpd_data[i + 1] == 'N') { | 
 | 12500 | 				int partno_len = vpd_data[i + 2]; | 
 | 12501 |  | 
| Michael Chan | af2c6a4 | 2006-11-07 14:57:51 -0800 | [diff] [blame] | 12502 | 				i += 3; | 
| Matt Carlson | 141518c | 2009-12-03 08:36:22 +0000 | [diff] [blame] | 12503 | 				if (partno_len > TG3_BPN_SIZE || | 
 | 12504 | 				    (partno_len + i) > TG3_NVM_VPD_LEN) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12505 | 					goto out_not_found; | 
 | 12506 |  | 
 | 12507 | 				memcpy(tp->board_part_number, | 
| Michael Chan | af2c6a4 | 2006-11-07 14:57:51 -0800 | [diff] [blame] | 12508 | 				       &vpd_data[i], partno_len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12509 |  | 
 | 12510 | 				/* Success. */ | 
 | 12511 | 				return; | 
 | 12512 | 			} | 
| Michael Chan | af2c6a4 | 2006-11-07 14:57:51 -0800 | [diff] [blame] | 12513 | 			i += 3 + vpd_data[i + 2]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12514 | 		} | 
 | 12515 |  | 
 | 12516 | 		/* Part number not found. */ | 
 | 12517 | 		goto out_not_found; | 
 | 12518 | 	} | 
 | 12519 |  | 
 | 12520 | out_not_found: | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 12521 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 
 | 12522 | 		strcpy(tp->board_part_number, "BCM95906"); | 
| Matt Carlson | df259d8 | 2009-04-20 06:57:14 +0000 | [diff] [blame] | 12523 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | 
 | 12524 | 		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) | 
 | 12525 | 		strcpy(tp->board_part_number, "BCM57780"); | 
 | 12526 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | 
 | 12527 | 		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) | 
 | 12528 | 		strcpy(tp->board_part_number, "BCM57760"); | 
 | 12529 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | 
 | 12530 | 		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) | 
 | 12531 | 		strcpy(tp->board_part_number, "BCM57790"); | 
| Matt Carlson | 5e7ccf2 | 2009-08-25 10:08:42 +0000 | [diff] [blame] | 12532 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | 
 | 12533 | 		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) | 
 | 12534 | 		strcpy(tp->board_part_number, "BCM57788"); | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 12535 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
 | 12536 | 		strcpy(tp->board_part_number, "BCM57765"); | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 12537 | 	else | 
 | 12538 | 		strcpy(tp->board_part_number, "none"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12539 | } | 
 | 12540 |  | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12541 | static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) | 
 | 12542 | { | 
 | 12543 | 	u32 val; | 
 | 12544 |  | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 12545 | 	if (tg3_nvram_read(tp, offset, &val) || | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12546 | 	    (val & 0xfc000000) != 0x0c000000 || | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 12547 | 	    tg3_nvram_read(tp, offset + 4, &val) || | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12548 | 	    val != 0) | 
 | 12549 | 		return 0; | 
 | 12550 |  | 
 | 12551 | 	return 1; | 
 | 12552 | } | 
 | 12553 |  | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12554 | static void __devinit tg3_read_bc_ver(struct tg3 *tp) | 
 | 12555 | { | 
| Matt Carlson | ff3a7cb | 2009-02-25 14:26:58 +0000 | [diff] [blame] | 12556 | 	u32 val, offset, start, ver_offset; | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12557 | 	int i; | 
| Matt Carlson | ff3a7cb | 2009-02-25 14:26:58 +0000 | [diff] [blame] | 12558 | 	bool newver = false; | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12559 |  | 
 | 12560 | 	if (tg3_nvram_read(tp, 0xc, &offset) || | 
 | 12561 | 	    tg3_nvram_read(tp, 0x4, &start)) | 
 | 12562 | 		return; | 
 | 12563 |  | 
 | 12564 | 	offset = tg3_nvram_logical_addr(tp, offset); | 
 | 12565 |  | 
| Matt Carlson | ff3a7cb | 2009-02-25 14:26:58 +0000 | [diff] [blame] | 12566 | 	if (tg3_nvram_read(tp, offset, &val)) | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12567 | 		return; | 
 | 12568 |  | 
| Matt Carlson | ff3a7cb | 2009-02-25 14:26:58 +0000 | [diff] [blame] | 12569 | 	if ((val & 0xfc000000) == 0x0c000000) { | 
 | 12570 | 		if (tg3_nvram_read(tp, offset + 4, &val)) | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12571 | 			return; | 
 | 12572 |  | 
| Matt Carlson | ff3a7cb | 2009-02-25 14:26:58 +0000 | [diff] [blame] | 12573 | 		if (val == 0) | 
 | 12574 | 			newver = true; | 
 | 12575 | 	} | 
 | 12576 |  | 
 | 12577 | 	if (newver) { | 
 | 12578 | 		if (tg3_nvram_read(tp, offset + 8, &ver_offset)) | 
 | 12579 | 			return; | 
 | 12580 |  | 
 | 12581 | 		offset = offset + ver_offset - start; | 
 | 12582 | 		for (i = 0; i < 16; i += 4) { | 
 | 12583 | 			__be32 v; | 
 | 12584 | 			if (tg3_nvram_read_be32(tp, offset + i, &v)) | 
 | 12585 | 				return; | 
 | 12586 |  | 
 | 12587 | 			memcpy(tp->fw_ver + i, &v, sizeof(v)); | 
 | 12588 | 		} | 
 | 12589 | 	} else { | 
 | 12590 | 		u32 major, minor; | 
 | 12591 |  | 
 | 12592 | 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) | 
 | 12593 | 			return; | 
 | 12594 |  | 
 | 12595 | 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> | 
 | 12596 | 			TG3_NVM_BCVER_MAJSFT; | 
 | 12597 | 		minor = ver_offset & TG3_NVM_BCVER_MINMSK; | 
 | 12598 | 		snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor); | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12599 | 	} | 
 | 12600 | } | 
 | 12601 |  | 
| Matt Carlson | a6f6cb1 | 2009-02-25 14:27:43 +0000 | [diff] [blame] | 12602 | static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) | 
 | 12603 | { | 
 | 12604 | 	u32 val, major, minor; | 
 | 12605 |  | 
 | 12606 | 	/* Use native endian representation */ | 
 | 12607 | 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) | 
 | 12608 | 		return; | 
 | 12609 |  | 
 | 12610 | 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> | 
 | 12611 | 		TG3_NVM_HWSB_CFG1_MAJSFT; | 
 | 12612 | 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> | 
 | 12613 | 		TG3_NVM_HWSB_CFG1_MINSFT; | 
 | 12614 |  | 
 | 12615 | 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); | 
 | 12616 | } | 
 | 12617 |  | 
| Matt Carlson | dfe00d7 | 2008-11-21 17:19:41 -0800 | [diff] [blame] | 12618 | static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) | 
 | 12619 | { | 
 | 12620 | 	u32 offset, major, minor, build; | 
 | 12621 |  | 
 | 12622 | 	tp->fw_ver[0] = 's'; | 
 | 12623 | 	tp->fw_ver[1] = 'b'; | 
 | 12624 | 	tp->fw_ver[2] = '\0'; | 
 | 12625 |  | 
 | 12626 | 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) | 
 | 12627 | 		return; | 
 | 12628 |  | 
 | 12629 | 	switch (val & TG3_EEPROM_SB_REVISION_MASK) { | 
 | 12630 | 	case TG3_EEPROM_SB_REVISION_0: | 
 | 12631 | 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF; | 
 | 12632 | 		break; | 
 | 12633 | 	case TG3_EEPROM_SB_REVISION_2: | 
 | 12634 | 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF; | 
 | 12635 | 		break; | 
 | 12636 | 	case TG3_EEPROM_SB_REVISION_3: | 
 | 12637 | 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF; | 
 | 12638 | 		break; | 
 | 12639 | 	default: | 
 | 12640 | 		return; | 
 | 12641 | 	} | 
 | 12642 |  | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 12643 | 	if (tg3_nvram_read(tp, offset, &val)) | 
| Matt Carlson | dfe00d7 | 2008-11-21 17:19:41 -0800 | [diff] [blame] | 12644 | 		return; | 
 | 12645 |  | 
 | 12646 | 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> | 
 | 12647 | 		TG3_EEPROM_SB_EDH_BLD_SHFT; | 
 | 12648 | 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> | 
 | 12649 | 		TG3_EEPROM_SB_EDH_MAJ_SHFT; | 
 | 12650 | 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK; | 
 | 12651 |  | 
 | 12652 | 	if (minor > 99 || build > 26) | 
 | 12653 | 		return; | 
 | 12654 |  | 
 | 12655 | 	snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor); | 
 | 12656 |  | 
 | 12657 | 	if (build > 0) { | 
 | 12658 | 		tp->fw_ver[8] = 'a' + build - 1; | 
 | 12659 | 		tp->fw_ver[9] = '\0'; | 
 | 12660 | 	} | 
 | 12661 | } | 
 | 12662 |  | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12663 | static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) | 
| Michael Chan | c4e6575 | 2006-03-20 22:29:32 -0800 | [diff] [blame] | 12664 | { | 
 | 12665 | 	u32 val, offset, start; | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12666 | 	int i, vlen; | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12667 |  | 
 | 12668 | 	for (offset = TG3_NVM_DIR_START; | 
 | 12669 | 	     offset < TG3_NVM_DIR_END; | 
 | 12670 | 	     offset += TG3_NVM_DIRENT_SIZE) { | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 12671 | 		if (tg3_nvram_read(tp, offset, &val)) | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12672 | 			return; | 
 | 12673 |  | 
 | 12674 | 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) | 
 | 12675 | 			break; | 
 | 12676 | 	} | 
 | 12677 |  | 
 | 12678 | 	if (offset == TG3_NVM_DIR_END) | 
 | 12679 | 		return; | 
 | 12680 |  | 
 | 12681 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 
 | 12682 | 		start = 0x08000000; | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 12683 | 	else if (tg3_nvram_read(tp, offset - 4, &start)) | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12684 | 		return; | 
 | 12685 |  | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 12686 | 	if (tg3_nvram_read(tp, offset + 4, &offset) || | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12687 | 	    !tg3_fw_img_is_valid(tp, offset) || | 
| Matt Carlson | e4f3411 | 2009-02-25 14:25:00 +0000 | [diff] [blame] | 12688 | 	    tg3_nvram_read(tp, offset + 8, &val)) | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12689 | 		return; | 
 | 12690 |  | 
 | 12691 | 	offset += val - start; | 
 | 12692 |  | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12693 | 	vlen = strlen(tp->fw_ver); | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12694 |  | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12695 | 	tp->fw_ver[vlen++] = ','; | 
 | 12696 | 	tp->fw_ver[vlen++] = ' '; | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12697 |  | 
 | 12698 | 	for (i = 0; i < 4; i++) { | 
| Matt Carlson | a9dc529 | 2009-02-25 14:25:30 +0000 | [diff] [blame] | 12699 | 		__be32 v; | 
 | 12700 | 		if (tg3_nvram_read_be32(tp, offset, &v)) | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12701 | 			return; | 
 | 12702 |  | 
| Al Viro | b9fc7dc | 2007-12-17 22:59:57 -0800 | [diff] [blame] | 12703 | 		offset += sizeof(v); | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12704 |  | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12705 | 		if (vlen > TG3_VER_SIZE - sizeof(v)) { | 
 | 12706 | 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12707 | 			break; | 
 | 12708 | 		} | 
 | 12709 |  | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12710 | 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); | 
 | 12711 | 		vlen += sizeof(v); | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12712 | 	} | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12713 | } | 
 | 12714 |  | 
| Matt Carlson | 7fd7644 | 2009-02-25 14:27:20 +0000 | [diff] [blame] | 12715 | static void __devinit tg3_read_dash_ver(struct tg3 *tp) | 
 | 12716 | { | 
 | 12717 | 	int vlen; | 
 | 12718 | 	u32 apedata; | 
 | 12719 |  | 
 | 12720 | 	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || | 
 | 12721 | 	    !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF)) | 
 | 12722 | 		return; | 
 | 12723 |  | 
 | 12724 | 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); | 
 | 12725 | 	if (apedata != APE_SEG_SIG_MAGIC) | 
 | 12726 | 		return; | 
 | 12727 |  | 
 | 12728 | 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); | 
 | 12729 | 	if (!(apedata & APE_FW_STATUS_READY)) | 
 | 12730 | 		return; | 
 | 12731 |  | 
 | 12732 | 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); | 
 | 12733 |  | 
 | 12734 | 	vlen = strlen(tp->fw_ver); | 
 | 12735 |  | 
 | 12736 | 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d", | 
 | 12737 | 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, | 
 | 12738 | 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, | 
 | 12739 | 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, | 
 | 12740 | 		 (apedata & APE_FW_VERSION_BLDMSK)); | 
 | 12741 | } | 
 | 12742 |  | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12743 | static void __devinit tg3_read_fw_ver(struct tg3 *tp) | 
 | 12744 | { | 
 | 12745 | 	u32 val; | 
 | 12746 |  | 
| Matt Carlson | df259d8 | 2009-04-20 06:57:14 +0000 | [diff] [blame] | 12747 | 	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { | 
 | 12748 | 		tp->fw_ver[0] = 's'; | 
 | 12749 | 		tp->fw_ver[1] = 'b'; | 
 | 12750 | 		tp->fw_ver[2] = '\0'; | 
 | 12751 |  | 
 | 12752 | 		return; | 
 | 12753 | 	} | 
 | 12754 |  | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12755 | 	if (tg3_nvram_read(tp, 0, &val)) | 
 | 12756 | 		return; | 
 | 12757 |  | 
 | 12758 | 	if (val == TG3_EEPROM_MAGIC) | 
 | 12759 | 		tg3_read_bc_ver(tp); | 
 | 12760 | 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) | 
 | 12761 | 		tg3_read_sb_ver(tp, val); | 
| Matt Carlson | a6f6cb1 | 2009-02-25 14:27:43 +0000 | [diff] [blame] | 12762 | 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) | 
 | 12763 | 		tg3_read_hwsb_ver(tp); | 
| Matt Carlson | acd9c11 | 2009-02-25 14:26:33 +0000 | [diff] [blame] | 12764 | 	else | 
 | 12765 | 		return; | 
 | 12766 |  | 
 | 12767 | 	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || | 
 | 12768 | 	     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) | 
 | 12769 | 		return; | 
 | 12770 |  | 
 | 12771 | 	tg3_read_mgmtfw_ver(tp); | 
| Matt Carlson | 9c8a620 | 2007-10-21 16:16:08 -0700 | [diff] [blame] | 12772 |  | 
 | 12773 | 	tp->fw_ver[TG3_VER_SIZE - 1] = 0; | 
| Michael Chan | c4e6575 | 2006-03-20 22:29:32 -0800 | [diff] [blame] | 12774 | } | 
 | 12775 |  | 
| Michael Chan | 7544b09 | 2007-05-05 13:08:32 -0700 | [diff] [blame] | 12776 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); | 
 | 12777 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12778 | static int __devinit tg3_get_invariants(struct tg3 *tp) | 
 | 12779 | { | 
 | 12780 | 	static struct pci_device_id write_reorder_chipsets[] = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12781 | 		{ PCI_DEVICE(PCI_VENDOR_ID_AMD, | 
 | 12782 | 		             PCI_DEVICE_ID_AMD_FE_GATE_700C) }, | 
| John W. Linville | c165b00 | 2006-07-08 13:28:53 -0700 | [diff] [blame] | 12783 | 		{ PCI_DEVICE(PCI_VENDOR_ID_AMD, | 
 | 12784 | 		             PCI_DEVICE_ID_AMD_8131_BRIDGE) }, | 
| Michael Chan | 399de50 | 2005-10-03 14:02:39 -0700 | [diff] [blame] | 12785 | 		{ PCI_DEVICE(PCI_VENDOR_ID_VIA, | 
 | 12786 | 			     PCI_DEVICE_ID_VIA_8385_0) }, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12787 | 		{ }, | 
 | 12788 | 	}; | 
 | 12789 | 	u32 misc_ctrl_reg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12790 | 	u32 pci_state_reg, grc_misc_cfg; | 
 | 12791 | 	u32 val; | 
 | 12792 | 	u16 pci_cmd; | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 12793 | 	int err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12794 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12795 | 	/* Force memory write invalidate off.  If we leave it on, | 
 | 12796 | 	 * then on 5700_BX chips we have to enable a workaround. | 
 | 12797 | 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary | 
 | 12798 | 	 * to match the cacheline size.  The Broadcom driver have this | 
 | 12799 | 	 * workaround but turns MWI off all the times so never uses | 
 | 12800 | 	 * it.  This seems to suggest that the workaround is insufficient. | 
 | 12801 | 	 */ | 
 | 12802 | 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); | 
 | 12803 | 	pci_cmd &= ~PCI_COMMAND_INVALIDATE; | 
 | 12804 | 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); | 
 | 12805 |  | 
 | 12806 | 	/* It is absolutely critical that TG3PCI_MISC_HOST_CTRL | 
 | 12807 | 	 * has the register indirect write enable bit set before | 
 | 12808 | 	 * we try to access any of the MMIO registers.  It is also | 
 | 12809 | 	 * critical that the PCI-X hw workaround situation is decided | 
 | 12810 | 	 * before that as well. | 
 | 12811 | 	 */ | 
 | 12812 | 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 
 | 12813 | 			      &misc_ctrl_reg); | 
 | 12814 |  | 
 | 12815 | 	tp->pci_chip_rev_id = (misc_ctrl_reg >> | 
 | 12816 | 			       MISC_HOST_CTRL_CHIPREV_SHIFT); | 
| Matt Carlson | 795d01c | 2007-10-07 23:28:17 -0700 | [diff] [blame] | 12817 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { | 
 | 12818 | 		u32 prod_id_asic_rev; | 
 | 12819 |  | 
| Matt Carlson | 5001e2f | 2009-11-13 13:03:51 +0000 | [diff] [blame] | 12820 | 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || | 
 | 12821 | 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || | 
 | 12822 | 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 12823 | 			pci_read_config_dword(tp->pdev, | 
 | 12824 | 					      TG3PCI_GEN2_PRODID_ASICREV, | 
 | 12825 | 					      &prod_id_asic_rev); | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 12826 | 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || | 
 | 12827 | 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || | 
 | 12828 | 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || | 
 | 12829 | 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || | 
 | 12830 | 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || | 
 | 12831 | 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) | 
 | 12832 | 			pci_read_config_dword(tp->pdev, | 
 | 12833 | 					      TG3PCI_GEN15_PRODID_ASICREV, | 
 | 12834 | 					      &prod_id_asic_rev); | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 12835 | 		else | 
 | 12836 | 			pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, | 
 | 12837 | 					      &prod_id_asic_rev); | 
 | 12838 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 12839 | 		tp->pci_chip_rev_id = prod_id_asic_rev; | 
| Matt Carlson | 795d01c | 2007-10-07 23:28:17 -0700 | [diff] [blame] | 12840 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12841 |  | 
| Michael Chan | ff645be | 2005-04-21 17:09:53 -0700 | [diff] [blame] | 12842 | 	/* Wrong chip ID in 5752 A0. This code can be removed later | 
 | 12843 | 	 * as A0 is not in production. | 
 | 12844 | 	 */ | 
 | 12845 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) | 
 | 12846 | 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; | 
 | 12847 |  | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 12848 | 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets, | 
 | 12849 | 	 * we need to disable memory and use config. cycles | 
 | 12850 | 	 * only to access all registers. The 5702/03 chips | 
 | 12851 | 	 * can mistakenly decode the special cycles from the | 
 | 12852 | 	 * ICH chipsets as memory write cycles, causing corruption | 
 | 12853 | 	 * of register and memory space. Only certain ICH bridges | 
 | 12854 | 	 * will drive special cycles with non-zero data during the | 
 | 12855 | 	 * address phase which can fall within the 5703's address | 
 | 12856 | 	 * range. This is not an ICH bug as the PCI spec allows | 
 | 12857 | 	 * non-zero address during special cycles. However, only | 
 | 12858 | 	 * these ICH bridges are known to drive non-zero addresses | 
 | 12859 | 	 * during special cycles. | 
 | 12860 | 	 * | 
 | 12861 | 	 * Since special cycles do not cross PCI bridges, we only | 
 | 12862 | 	 * enable this workaround if the 5703 is on the secondary | 
 | 12863 | 	 * bus of these ICH bridges. | 
 | 12864 | 	 */ | 
 | 12865 | 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || | 
 | 12866 | 	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { | 
 | 12867 | 		static struct tg3_dev_id { | 
 | 12868 | 			u32	vendor; | 
 | 12869 | 			u32	device; | 
 | 12870 | 			u32	rev; | 
 | 12871 | 		} ich_chipsets[] = { | 
 | 12872 | 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, | 
 | 12873 | 			  PCI_ANY_ID }, | 
 | 12874 | 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, | 
 | 12875 | 			  PCI_ANY_ID }, | 
 | 12876 | 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, | 
 | 12877 | 			  0xa }, | 
 | 12878 | 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, | 
 | 12879 | 			  PCI_ANY_ID }, | 
 | 12880 | 			{ }, | 
 | 12881 | 		}; | 
 | 12882 | 		struct tg3_dev_id *pci_id = &ich_chipsets[0]; | 
 | 12883 | 		struct pci_dev *bridge = NULL; | 
 | 12884 |  | 
 | 12885 | 		while (pci_id->vendor != 0) { | 
 | 12886 | 			bridge = pci_get_device(pci_id->vendor, pci_id->device, | 
 | 12887 | 						bridge); | 
 | 12888 | 			if (!bridge) { | 
 | 12889 | 				pci_id++; | 
 | 12890 | 				continue; | 
 | 12891 | 			} | 
 | 12892 | 			if (pci_id->rev != PCI_ANY_ID) { | 
| Auke Kok | 44c1013 | 2007-06-08 15:46:36 -0700 | [diff] [blame] | 12893 | 				if (bridge->revision > pci_id->rev) | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 12894 | 					continue; | 
 | 12895 | 			} | 
 | 12896 | 			if (bridge->subordinate && | 
 | 12897 | 			    (bridge->subordinate->number == | 
 | 12898 | 			     tp->pdev->bus->number)) { | 
 | 12899 |  | 
 | 12900 | 				tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND; | 
 | 12901 | 				pci_dev_put(bridge); | 
 | 12902 | 				break; | 
 | 12903 | 			} | 
 | 12904 | 		} | 
 | 12905 | 	} | 
 | 12906 |  | 
| Matt Carlson | 41588ba | 2008-04-19 18:12:33 -0700 | [diff] [blame] | 12907 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { | 
 | 12908 | 		static struct tg3_dev_id { | 
 | 12909 | 			u32	vendor; | 
 | 12910 | 			u32	device; | 
 | 12911 | 		} bridge_chipsets[] = { | 
 | 12912 | 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, | 
 | 12913 | 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, | 
 | 12914 | 			{ }, | 
 | 12915 | 		}; | 
 | 12916 | 		struct tg3_dev_id *pci_id = &bridge_chipsets[0]; | 
 | 12917 | 		struct pci_dev *bridge = NULL; | 
 | 12918 |  | 
 | 12919 | 		while (pci_id->vendor != 0) { | 
 | 12920 | 			bridge = pci_get_device(pci_id->vendor, | 
 | 12921 | 						pci_id->device, | 
 | 12922 | 						bridge); | 
 | 12923 | 			if (!bridge) { | 
 | 12924 | 				pci_id++; | 
 | 12925 | 				continue; | 
 | 12926 | 			} | 
 | 12927 | 			if (bridge->subordinate && | 
 | 12928 | 			    (bridge->subordinate->number <= | 
 | 12929 | 			     tp->pdev->bus->number) && | 
 | 12930 | 			    (bridge->subordinate->subordinate >= | 
 | 12931 | 			     tp->pdev->bus->number)) { | 
 | 12932 | 				tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG; | 
 | 12933 | 				pci_dev_put(bridge); | 
 | 12934 | 				break; | 
 | 12935 | 			} | 
 | 12936 | 		} | 
 | 12937 | 	} | 
 | 12938 |  | 
| Michael Chan | 4a29cc2 | 2006-03-19 13:21:12 -0800 | [diff] [blame] | 12939 | 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support | 
 | 12940 | 	 * DMA addresses > 40-bit. This bridge may have other additional | 
 | 12941 | 	 * 57xx devices behind it in some 4-port NIC designs for example. | 
 | 12942 | 	 * Any tg3 device found behind the bridge will also need the 40-bit | 
 | 12943 | 	 * DMA workaround. | 
 | 12944 | 	 */ | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 12945 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || | 
 | 12946 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { | 
 | 12947 | 		tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; | 
| Michael Chan | 4a29cc2 | 2006-03-19 13:21:12 -0800 | [diff] [blame] | 12948 | 		tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; | 
| Michael Chan | 4cf78e4 | 2005-07-25 12:29:19 -0700 | [diff] [blame] | 12949 | 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 12950 | 	} | 
| Michael Chan | 4a29cc2 | 2006-03-19 13:21:12 -0800 | [diff] [blame] | 12951 | 	else { | 
 | 12952 | 		struct pci_dev *bridge = NULL; | 
 | 12953 |  | 
 | 12954 | 		do { | 
 | 12955 | 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, | 
 | 12956 | 						PCI_DEVICE_ID_SERVERWORKS_EPB, | 
 | 12957 | 						bridge); | 
 | 12958 | 			if (bridge && bridge->subordinate && | 
 | 12959 | 			    (bridge->subordinate->number <= | 
 | 12960 | 			     tp->pdev->bus->number) && | 
 | 12961 | 			    (bridge->subordinate->subordinate >= | 
 | 12962 | 			     tp->pdev->bus->number)) { | 
 | 12963 | 				tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; | 
 | 12964 | 				pci_dev_put(bridge); | 
 | 12965 | 				break; | 
 | 12966 | 			} | 
 | 12967 | 		} while (bridge); | 
 | 12968 | 	} | 
| Michael Chan | 4cf78e4 | 2005-07-25 12:29:19 -0700 | [diff] [blame] | 12969 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12970 | 	/* Initialize misc host control in PCI block. */ | 
 | 12971 | 	tp->misc_host_ctrl |= (misc_ctrl_reg & | 
 | 12972 | 			       MISC_HOST_CTRL_CHIPREV); | 
 | 12973 | 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 
 | 12974 | 			       tp->misc_host_ctrl); | 
 | 12975 |  | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 12976 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || | 
 | 12977 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || | 
 | 12978 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 
| Michael Chan | 7544b09 | 2007-05-05 13:08:32 -0700 | [diff] [blame] | 12979 | 		tp->pdev_peer = tg3_find_peer(tp); | 
 | 12980 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 12981 | 	/* Intentionally exclude ASIC_REV_5906 */ | 
 | 12982 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 
| Michael Chan | d9ab5ad | 2006-03-20 22:27:35 -0800 | [diff] [blame] | 12983 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 12984 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 12985 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 
| Matt Carlson | 57e6983 | 2008-05-25 23:48:31 -0700 | [diff] [blame] | 12986 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 12987 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 12988 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 12989 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 12990 | 		tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; | 
 | 12991 |  | 
 | 12992 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || | 
 | 12993 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 12994 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 12995 | 	    (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 12996 | 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 
| John W. Linville | 6708e5c | 2005-04-21 17:00:52 -0700 | [diff] [blame] | 12997 | 		tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; | 
 | 12998 |  | 
| John W. Linville | 1b440c56 | 2005-04-21 17:03:18 -0700 | [diff] [blame] | 12999 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || | 
 | 13000 | 	    (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) | 
 | 13001 | 		tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; | 
 | 13002 |  | 
| Matt Carlson | 027455a | 2008-12-21 20:19:30 -0800 | [diff] [blame] | 13003 | 	/* 5700 B0 chips do not support checksumming correctly due | 
 | 13004 | 	 * to hardware bugs. | 
 | 13005 | 	 */ | 
 | 13006 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) | 
 | 13007 | 		tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS; | 
 | 13008 | 	else { | 
 | 13009 | 		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; | 
 | 13010 | 		tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 
 | 13011 | 		if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 
 | 13012 | 			tp->dev->features |= NETIF_F_IPV6_CSUM; | 
 | 13013 | 	} | 
 | 13014 |  | 
| Matt Carlson | 507399f | 2009-11-13 13:03:37 +0000 | [diff] [blame] | 13015 | 	/* Determine TSO capabilities */ | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 13016 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 13017 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 13018 | 		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; | 
 | 13019 | 	else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 
 | 13020 | 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 
| Matt Carlson | 507399f | 2009-11-13 13:03:37 +0000 | [diff] [blame] | 13021 | 		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; | 
 | 13022 | 	else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 
 | 13023 | 		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; | 
 | 13024 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && | 
 | 13025 | 		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) | 
 | 13026 | 			tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; | 
 | 13027 | 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 
 | 13028 | 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && | 
 | 13029 | 		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { | 
 | 13030 | 		tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; | 
 | 13031 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) | 
 | 13032 | 			tp->fw_needed = FIRMWARE_TG3TSO5; | 
 | 13033 | 		else | 
 | 13034 | 			tp->fw_needed = FIRMWARE_TG3TSO; | 
 | 13035 | 	} | 
 | 13036 |  | 
 | 13037 | 	tp->irq_max = 1; | 
 | 13038 |  | 
| Michael Chan | 5a6f307 | 2006-03-20 22:28:05 -0800 | [diff] [blame] | 13039 | 	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 
| Michael Chan | 7544b09 | 2007-05-05 13:08:32 -0700 | [diff] [blame] | 13040 | 		tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; | 
 | 13041 | 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || | 
 | 13042 | 		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || | 
 | 13043 | 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && | 
 | 13044 | 		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && | 
 | 13045 | 		     tp->pdev_peer == tp->pdev)) | 
 | 13046 | 			tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; | 
 | 13047 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 13048 | 		if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 13049 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
| Michael Chan | fcfa0a3 | 2006-03-20 22:28:41 -0800 | [diff] [blame] | 13050 | 			tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; | 
| Michael Chan | 52c0fd8 | 2006-06-29 20:15:54 -0700 | [diff] [blame] | 13051 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13052 |  | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 13053 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 13054 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | 
| Matt Carlson | 507399f | 2009-11-13 13:03:37 +0000 | [diff] [blame] | 13055 | 			tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; | 
 | 13056 | 			tp->irq_max = TG3_IRQ_MAX_VECS; | 
 | 13057 | 		} | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 13058 | 	} | 
| Matt Carlson | 0e1406d | 2009-11-02 12:33:33 +0000 | [diff] [blame] | 13059 |  | 
| Matt Carlson | 615774f | 2009-11-13 13:03:39 +0000 | [diff] [blame] | 13060 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 13061 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 
 | 13062 | 		tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; | 
 | 13063 | 	else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { | 
 | 13064 | 		tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; | 
 | 13065 | 		tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; | 
| Matt Carlson | 0e1406d | 2009-11-02 12:33:33 +0000 | [diff] [blame] | 13066 | 	} | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 13067 |  | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 13068 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 13069 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
 | 13070 | 		tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; | 
 | 13071 |  | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 13072 | 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 13073 | 	     (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 13074 | 		 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) | 
| Matt Carlson | 8f666b0 | 2009-08-28 13:58:24 +0000 | [diff] [blame] | 13075 | 		tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; | 
| Michael Chan | 0f893dc | 2005-07-25 12:30:38 -0700 | [diff] [blame] | 13076 |  | 
| Matt Carlson | 52f4490 | 2008-11-21 17:17:04 -0800 | [diff] [blame] | 13077 | 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 
 | 13078 | 			      &pci_state_reg); | 
 | 13079 |  | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 13080 | 	tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); | 
 | 13081 | 	if (tp->pcie_cap != 0) { | 
 | 13082 | 		u16 lnkctl; | 
 | 13083 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13084 | 		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; | 
| Matt Carlson | 5f5c51e | 2007-11-12 21:19:37 -0800 | [diff] [blame] | 13085 |  | 
 | 13086 | 		pcie_set_readrq(tp->pdev, 4096); | 
 | 13087 |  | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 13088 | 		pci_read_config_word(tp->pdev, | 
 | 13089 | 				     tp->pcie_cap + PCI_EXP_LNKCTL, | 
 | 13090 | 				     &lnkctl); | 
 | 13091 | 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { | 
 | 13092 | 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 
| Michael Chan | c7835a7 | 2006-11-15 21:14:42 -0800 | [diff] [blame] | 13093 | 				tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 13094 | 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 13095 | 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 
| Matt Carlson | 9cf74eb | 2009-04-20 06:58:27 +0000 | [diff] [blame] | 13096 | 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || | 
 | 13097 | 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) | 
| Matt Carlson | 5e7dfd0 | 2008-11-21 17:18:16 -0800 | [diff] [blame] | 13098 | 				tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG; | 
| Michael Chan | c7835a7 | 2006-11-15 21:14:42 -0800 | [diff] [blame] | 13099 | 		} | 
| Matt Carlson | 52f4490 | 2008-11-21 17:17:04 -0800 | [diff] [blame] | 13100 | 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { | 
| Matt Carlson | fcb389d | 2008-11-03 16:55:44 -0800 | [diff] [blame] | 13101 | 		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; | 
| Matt Carlson | 52f4490 | 2008-11-21 17:17:04 -0800 | [diff] [blame] | 13102 | 	} else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 
 | 13103 | 		   (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 
 | 13104 | 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); | 
 | 13105 | 		if (!tp->pcix_cap) { | 
 | 13106 | 			printk(KERN_ERR PFX "Cannot find PCI-X " | 
 | 13107 | 					    "capability, aborting.\n"); | 
 | 13108 | 			return -EIO; | 
 | 13109 | 		} | 
 | 13110 |  | 
 | 13111 | 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) | 
 | 13112 | 			tp->tg3_flags |= TG3_FLAG_PCIX_MODE; | 
 | 13113 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13114 |  | 
| Michael Chan | 399de50 | 2005-10-03 14:02:39 -0700 | [diff] [blame] | 13115 | 	/* If we have an AMD 762 or VIA K8T800 chipset, write | 
 | 13116 | 	 * reordering to the mailbox registers done by the host | 
 | 13117 | 	 * controller can cause major troubles.  We read back from | 
 | 13118 | 	 * every mailbox register write to force the writes to be | 
 | 13119 | 	 * posted to the chip in order. | 
 | 13120 | 	 */ | 
 | 13121 | 	if (pci_dev_present(write_reorder_chipsets) && | 
 | 13122 | 	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | 
 | 13123 | 		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; | 
 | 13124 |  | 
| Matt Carlson | 69fc405 | 2008-12-21 20:19:57 -0800 | [diff] [blame] | 13125 | 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, | 
 | 13126 | 			     &tp->pci_cacheline_sz); | 
 | 13127 | 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, | 
 | 13128 | 			     &tp->pci_lat_timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13129 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && | 
 | 13130 | 	    tp->pci_lat_timer < 64) { | 
 | 13131 | 		tp->pci_lat_timer = 64; | 
| Matt Carlson | 69fc405 | 2008-12-21 20:19:57 -0800 | [diff] [blame] | 13132 | 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, | 
 | 13133 | 				      tp->pci_lat_timer); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13134 | 	} | 
 | 13135 |  | 
| Matt Carlson | 52f4490 | 2008-11-21 17:17:04 -0800 | [diff] [blame] | 13136 | 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { | 
 | 13137 | 		/* 5700 BX chips need to have their TX producer index | 
 | 13138 | 		 * mailboxes written twice to workaround a bug. | 
 | 13139 | 		 */ | 
 | 13140 | 		tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG; | 
| Matt Carlson | 9974a35 | 2007-10-07 23:27:28 -0700 | [diff] [blame] | 13141 |  | 
| Matt Carlson | 52f4490 | 2008-11-21 17:17:04 -0800 | [diff] [blame] | 13142 | 		/* If we are in PCI-X mode, enable register write workaround. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13143 | 		 * | 
 | 13144 | 		 * The workaround is to use indirect register accesses | 
 | 13145 | 		 * for all chip writes not to mailbox registers. | 
 | 13146 | 		 */ | 
| Matt Carlson | 52f4490 | 2008-11-21 17:17:04 -0800 | [diff] [blame] | 13147 | 		if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13148 | 			u32 pm_reg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13149 |  | 
 | 13150 | 			tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; | 
 | 13151 |  | 
 | 13152 | 			/* The chip can have it's power management PCI config | 
 | 13153 | 			 * space registers clobbered due to this bug. | 
 | 13154 | 			 * So explicitly force the chip into D0 here. | 
 | 13155 | 			 */ | 
| Matt Carlson | 9974a35 | 2007-10-07 23:27:28 -0700 | [diff] [blame] | 13156 | 			pci_read_config_dword(tp->pdev, | 
 | 13157 | 					      tp->pm_cap + PCI_PM_CTRL, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13158 | 					      &pm_reg); | 
 | 13159 | 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK; | 
 | 13160 | 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; | 
| Matt Carlson | 9974a35 | 2007-10-07 23:27:28 -0700 | [diff] [blame] | 13161 | 			pci_write_config_dword(tp->pdev, | 
 | 13162 | 					       tp->pm_cap + PCI_PM_CTRL, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13163 | 					       pm_reg); | 
 | 13164 |  | 
 | 13165 | 			/* Also, force SERR#/PERR# in PCI command. */ | 
 | 13166 | 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); | 
 | 13167 | 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; | 
 | 13168 | 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); | 
 | 13169 | 		} | 
 | 13170 | 	} | 
 | 13171 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13172 | 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) | 
 | 13173 | 		tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; | 
 | 13174 | 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) | 
 | 13175 | 		tp->tg3_flags |= TG3_FLAG_PCI_32BIT; | 
 | 13176 |  | 
 | 13177 | 	/* Chip-specific fixup from Broadcom driver */ | 
 | 13178 | 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && | 
 | 13179 | 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { | 
 | 13180 | 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA; | 
 | 13181 | 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); | 
 | 13182 | 	} | 
 | 13183 |  | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 13184 | 	/* Default fast path register access methods */ | 
| Michael Chan | 2009493 | 2005-08-09 20:16:32 -0700 | [diff] [blame] | 13185 | 	tp->read32 = tg3_read32; | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 13186 | 	tp->write32 = tg3_write32; | 
| Michael Chan | 09ee929 | 2005-08-09 20:17:00 -0700 | [diff] [blame] | 13187 | 	tp->read32_mbox = tg3_read32; | 
| Michael Chan | 2009493 | 2005-08-09 20:16:32 -0700 | [diff] [blame] | 13188 | 	tp->write32_mbox = tg3_write32; | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 13189 | 	tp->write32_tx_mbox = tg3_write32; | 
 | 13190 | 	tp->write32_rx_mbox = tg3_write32; | 
 | 13191 |  | 
 | 13192 | 	/* Various workaround register access methods */ | 
 | 13193 | 	if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) | 
 | 13194 | 		tp->write32 = tg3_write_indirect_reg32; | 
| Matt Carlson | 98efd8a | 2007-05-05 12:47:25 -0700 | [diff] [blame] | 13195 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || | 
 | 13196 | 		 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 
 | 13197 | 		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { | 
 | 13198 | 		/* | 
 | 13199 | 		 * Back to back register writes can cause problems on these | 
 | 13200 | 		 * chips, the workaround is to read back all reg writes | 
 | 13201 | 		 * except those to mailbox regs. | 
 | 13202 | 		 * | 
 | 13203 | 		 * See tg3_write_indirect_reg32(). | 
 | 13204 | 		 */ | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 13205 | 		tp->write32 = tg3_write_flush_reg32; | 
| Matt Carlson | 98efd8a | 2007-05-05 12:47:25 -0700 | [diff] [blame] | 13206 | 	} | 
 | 13207 |  | 
| Michael Chan | 1ee582d | 2005-08-09 20:16:46 -0700 | [diff] [blame] | 13208 | 	if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) || | 
 | 13209 | 	    (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) { | 
 | 13210 | 		tp->write32_tx_mbox = tg3_write32_tx_mbox; | 
 | 13211 | 		if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) | 
 | 13212 | 			tp->write32_rx_mbox = tg3_write_flush_reg32; | 
 | 13213 | 	} | 
| Michael Chan | 2009493 | 2005-08-09 20:16:32 -0700 | [diff] [blame] | 13214 |  | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 13215 | 	if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) { | 
 | 13216 | 		tp->read32 = tg3_read_indirect_reg32; | 
 | 13217 | 		tp->write32 = tg3_write_indirect_reg32; | 
 | 13218 | 		tp->read32_mbox = tg3_read_indirect_mbox; | 
 | 13219 | 		tp->write32_mbox = tg3_write_indirect_mbox; | 
 | 13220 | 		tp->write32_tx_mbox = tg3_write_indirect_mbox; | 
 | 13221 | 		tp->write32_rx_mbox = tg3_write_indirect_mbox; | 
 | 13222 |  | 
 | 13223 | 		iounmap(tp->regs); | 
| Peter Hagervall | 22abe31 | 2005-09-16 17:01:03 -0700 | [diff] [blame] | 13224 | 		tp->regs = NULL; | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 13225 |  | 
 | 13226 | 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); | 
 | 13227 | 		pci_cmd &= ~PCI_COMMAND_MEMORY; | 
 | 13228 | 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); | 
 | 13229 | 	} | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 13230 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
 | 13231 | 		tp->read32_mbox = tg3_read32_mbox_5906; | 
 | 13232 | 		tp->write32_mbox = tg3_write32_mbox_5906; | 
 | 13233 | 		tp->write32_tx_mbox = tg3_write32_mbox_5906; | 
 | 13234 | 		tp->write32_rx_mbox = tg3_write32_mbox_5906; | 
 | 13235 | 	} | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 13236 |  | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 13237 | 	if (tp->write32 == tg3_write_indirect_reg32 || | 
 | 13238 | 	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | 
 | 13239 | 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
| David S. Miller | f49639e | 2006-06-09 11:58:36 -0700 | [diff] [blame] | 13240 | 	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) | 
| Michael Chan | bbadf50 | 2006-04-06 21:46:34 -0700 | [diff] [blame] | 13241 | 		tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG; | 
 | 13242 |  | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 13243 | 	/* Get eeprom hw config before calling tg3_set_power_state(). | 
| Michael Chan | 9d26e21 | 2006-12-07 00:21:14 -0800 | [diff] [blame] | 13244 | 	 * In particular, the TG3_FLG2_IS_NIC flag must be | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 13245 | 	 * determined before calling tg3_set_power_state() so that | 
 | 13246 | 	 * we know whether or not to switch out of Vaux power. | 
 | 13247 | 	 * When the flag is set, it means that GPIO1 is used for eeprom | 
 | 13248 | 	 * write protect and also implies that it is a LOM where GPIOs | 
 | 13249 | 	 * are not used to switch power. | 
| Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 13250 | 	 */ | 
| Michael Chan | 7d0c41e | 2005-04-21 17:06:20 -0700 | [diff] [blame] | 13251 | 	tg3_get_eeprom_hw_cfg(tp); | 
 | 13252 |  | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 13253 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 
 | 13254 | 		/* Allow reads and writes to the | 
 | 13255 | 		 * APE register and memory space. | 
 | 13256 | 		 */ | 
 | 13257 | 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | | 
 | 13258 | 				 PCISTATE_ALLOW_APE_SHMEM_WR; | 
 | 13259 | 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, | 
 | 13260 | 				       pci_state_reg); | 
 | 13261 | 	} | 
 | 13262 |  | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 13263 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 
| Matt Carlson | 57e6983 | 2008-05-25 23:48:31 -0700 | [diff] [blame] | 13264 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 13265 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 13266 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 13267 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 13268 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 13269 | 		tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; | 
 | 13270 |  | 
| Michael Chan | 314fba3 | 2005-04-21 17:07:04 -0700 | [diff] [blame] | 13271 | 	/* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). | 
 | 13272 | 	 * GPIO1 driven high will bring 5700's external PHY out of reset. | 
 | 13273 | 	 * It is also used as eeprom write protect on LOMs. | 
 | 13274 | 	 */ | 
 | 13275 | 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; | 
 | 13276 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || | 
 | 13277 | 	    (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) | 
 | 13278 | 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | | 
 | 13279 | 				       GRC_LCLCTRL_GPIO_OUTPUT1); | 
| Michael Chan | 3e7d83b | 2005-04-21 17:10:36 -0700 | [diff] [blame] | 13280 | 	/* Unused GPIO3 must be driven as output on 5752 because there | 
 | 13281 | 	 * are no pull-up resistors on unused GPIO pins. | 
 | 13282 | 	 */ | 
 | 13283 | 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) | 
 | 13284 | 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; | 
| Michael Chan | 314fba3 | 2005-04-21 17:07:04 -0700 | [diff] [blame] | 13285 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 13286 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 
 | 13287 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 
| Michael Chan | af36e6b | 2006-03-23 01:28:06 -0800 | [diff] [blame] | 13288 | 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; | 
 | 13289 |  | 
| Matt Carlson | 8d519ab | 2009-04-20 06:58:01 +0000 | [diff] [blame] | 13290 | 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || | 
 | 13291 | 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { | 
| Matt Carlson | 5f0c4a3 | 2008-06-09 15:41:12 -0700 | [diff] [blame] | 13292 | 		/* Turn off the debug UART. */ | 
 | 13293 | 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; | 
 | 13294 | 		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC) | 
 | 13295 | 			/* Keep VMain power. */ | 
 | 13296 | 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | | 
 | 13297 | 					      GRC_LCLCTRL_GPIO_OUTPUT0; | 
 | 13298 | 	} | 
 | 13299 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13300 | 	/* Force the chip into D0. */ | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 13301 | 	err = tg3_set_power_state(tp, PCI_D0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13302 | 	if (err) { | 
 | 13303 | 		printk(KERN_ERR PFX "(%s) transition to D0 failed\n", | 
 | 13304 | 		       pci_name(tp->pdev)); | 
 | 13305 | 		return err; | 
 | 13306 | 	} | 
 | 13307 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13308 | 	/* Derive initial jumbo mode from MTU assigned in | 
 | 13309 | 	 * ether_setup() via the alloc_etherdev() call | 
 | 13310 | 	 */ | 
| Michael Chan | 0f893dc | 2005-07-25 12:30:38 -0700 | [diff] [blame] | 13311 | 	if (tp->dev->mtu > ETH_DATA_LEN && | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 13312 | 	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 
| Michael Chan | 0f893dc | 2005-07-25 12:30:38 -0700 | [diff] [blame] | 13313 | 		tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13314 |  | 
 | 13315 | 	/* Determine WakeOnLan speed to use. */ | 
 | 13316 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 13317 | 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || | 
 | 13318 | 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || | 
 | 13319 | 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { | 
 | 13320 | 		tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB); | 
 | 13321 | 	} else { | 
 | 13322 | 		tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; | 
 | 13323 | 	} | 
 | 13324 |  | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 13325 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 
 | 13326 | 		tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; | 
 | 13327 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13328 | 	/* A few boards don't want Ethernet@WireSpeed phy feature */ | 
 | 13329 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) || | 
 | 13330 | 	    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && | 
 | 13331 | 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 13332 | 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 13333 | 	    (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) || | 
| Michael Chan | 747e8f8 | 2005-07-25 12:33:22 -0700 | [diff] [blame] | 13334 | 	    (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13335 | 		tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED; | 
 | 13336 |  | 
 | 13337 | 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || | 
 | 13338 | 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) | 
 | 13339 | 		tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG; | 
 | 13340 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) | 
 | 13341 | 		tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; | 
 | 13342 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 13343 | 	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 13344 | 	    !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 13345 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 13346 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 13347 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && | 
 | 13348 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { | 
| Michael Chan | c424cb2 | 2006-04-29 18:56:34 -0700 | [diff] [blame] | 13349 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 13350 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 13351 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 
 | 13352 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { | 
| Michael Chan | d4011ad | 2007-02-13 12:17:25 -0800 | [diff] [blame] | 13353 | 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && | 
 | 13354 | 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) | 
 | 13355 | 				tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; | 
| Michael Chan | c1d2a19 | 2007-01-08 19:57:20 -0800 | [diff] [blame] | 13356 | 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) | 
 | 13357 | 				tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 13358 | 		} else | 
| Michael Chan | c424cb2 | 2006-04-29 18:56:34 -0700 | [diff] [blame] | 13359 | 			tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; | 
 | 13360 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13361 |  | 
| Matt Carlson | b2a5c19 | 2008-04-03 21:44:44 -0700 | [diff] [blame] | 13362 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 
 | 13363 | 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { | 
 | 13364 | 		tp->phy_otp = tg3_read_otp_phycfg(tp); | 
 | 13365 | 		if (tp->phy_otp == 0) | 
 | 13366 | 			tp->phy_otp = TG3_OTP_DEFAULT; | 
 | 13367 | 	} | 
 | 13368 |  | 
| Matt Carlson | f51f356 | 2008-05-25 23:45:08 -0700 | [diff] [blame] | 13369 | 	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) | 
| Matt Carlson | 8ef2142 | 2008-05-02 16:47:53 -0700 | [diff] [blame] | 13370 | 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; | 
 | 13371 | 	else | 
 | 13372 | 		tp->mi_mode = MAC_MI_MODE_BASE; | 
 | 13373 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13374 | 	tp->coalesce_mode = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13375 | 	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && | 
 | 13376 | 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) | 
 | 13377 | 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE; | 
 | 13378 |  | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 13379 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 
 | 13380 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 
| Matt Carlson | 57e6983 | 2008-05-25 23:48:31 -0700 | [diff] [blame] | 13381 | 		tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; | 
 | 13382 |  | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 13383 | 	err = tg3_mdio_init(tp); | 
 | 13384 | 	if (err) | 
 | 13385 | 		return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13386 |  | 
 | 13387 | 	/* Initialize data/descriptor byte/word swapping. */ | 
 | 13388 | 	val = tr32(GRC_MODE); | 
 | 13389 | 	val &= GRC_MODE_HOST_STACKUP; | 
 | 13390 | 	tw32(GRC_MODE, val | tp->grc_mode); | 
 | 13391 |  | 
 | 13392 | 	tg3_switch_clocks(tp); | 
 | 13393 |  | 
 | 13394 | 	/* Clear this out for sanity. */ | 
 | 13395 | 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); | 
 | 13396 |  | 
 | 13397 | 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 
 | 13398 | 			      &pci_state_reg); | 
 | 13399 | 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && | 
 | 13400 | 	    (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) { | 
 | 13401 | 		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); | 
 | 13402 |  | 
 | 13403 | 		if (chiprevid == CHIPREV_ID_5701_A0 || | 
 | 13404 | 		    chiprevid == CHIPREV_ID_5701_B0 || | 
 | 13405 | 		    chiprevid == CHIPREV_ID_5701_B2 || | 
 | 13406 | 		    chiprevid == CHIPREV_ID_5701_B5) { | 
 | 13407 | 			void __iomem *sram_base; | 
 | 13408 |  | 
 | 13409 | 			/* Write some dummy words into the SRAM status block | 
 | 13410 | 			 * area, see if it reads back correctly.  If the return | 
 | 13411 | 			 * value is bad, force enable the PCIX workaround. | 
 | 13412 | 			 */ | 
 | 13413 | 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; | 
 | 13414 |  | 
 | 13415 | 			writel(0x00000000, sram_base); | 
 | 13416 | 			writel(0x00000000, sram_base + 4); | 
 | 13417 | 			writel(0xffffffff, sram_base + 4); | 
 | 13418 | 			if (readl(sram_base) != 0x00000000) | 
 | 13419 | 				tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; | 
 | 13420 | 		} | 
 | 13421 | 	} | 
 | 13422 |  | 
 | 13423 | 	udelay(50); | 
 | 13424 | 	tg3_nvram_init(tp); | 
 | 13425 |  | 
 | 13426 | 	grc_misc_cfg = tr32(GRC_MISC_CFG); | 
 | 13427 | 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; | 
 | 13428 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13429 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 
 | 13430 | 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || | 
 | 13431 | 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) | 
 | 13432 | 		tp->tg3_flags2 |= TG3_FLG2_IS_5788; | 
 | 13433 |  | 
| David S. Miller | fac9b83 | 2005-05-18 22:46:34 -0700 | [diff] [blame] | 13434 | 	if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) && | 
 | 13435 | 	    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)) | 
 | 13436 | 		tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS; | 
 | 13437 | 	if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { | 
 | 13438 | 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | | 
 | 13439 | 				      HOSTCC_MODE_CLRTICK_TXBD); | 
 | 13440 |  | 
 | 13441 | 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; | 
 | 13442 | 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, | 
 | 13443 | 				       tp->misc_host_ctrl); | 
 | 13444 | 	} | 
 | 13445 |  | 
| Matt Carlson | 3bda125 | 2008-08-15 14:08:22 -0700 | [diff] [blame] | 13446 | 	/* Preserve the APE MAC_MODE bits */ | 
 | 13447 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) | 
 | 13448 | 		tp->mac_mode = tr32(MAC_MODE) | | 
 | 13449 | 			       MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; | 
 | 13450 | 	else | 
 | 13451 | 		tp->mac_mode = TG3_DEF_MAC_MODE; | 
 | 13452 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13453 | 	/* these are limited to 10/100 only */ | 
 | 13454 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && | 
 | 13455 | 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || | 
 | 13456 | 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && | 
 | 13457 | 	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && | 
 | 13458 | 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || | 
 | 13459 | 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || | 
 | 13460 | 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || | 
 | 13461 | 	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && | 
 | 13462 | 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || | 
| Michael Chan | 676917d | 2006-12-07 00:20:22 -0800 | [diff] [blame] | 13463 | 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || | 
 | 13464 | 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 13465 | 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || | 
| Matt Carlson | 7f97a4b | 2009-08-25 10:10:03 +0000 | [diff] [blame] | 13466 | 	    (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13467 | 		tp->tg3_flags |= TG3_FLAG_10_100_ONLY; | 
 | 13468 |  | 
 | 13469 | 	err = tg3_phy_probe(tp); | 
 | 13470 | 	if (err) { | 
 | 13471 | 		printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n", | 
 | 13472 | 		       pci_name(tp->pdev), err); | 
 | 13473 | 		/* ... but do not return immediately ... */ | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 13474 | 		tg3_mdio_fini(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13475 | 	} | 
 | 13476 |  | 
 | 13477 | 	tg3_read_partno(tp); | 
| Michael Chan | c4e6575 | 2006-03-20 22:29:32 -0800 | [diff] [blame] | 13478 | 	tg3_read_fw_ver(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13479 |  | 
 | 13480 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 
 | 13481 | 		tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; | 
 | 13482 | 	} else { | 
 | 13483 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) | 
 | 13484 | 			tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT; | 
 | 13485 | 		else | 
 | 13486 | 			tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; | 
 | 13487 | 	} | 
 | 13488 |  | 
 | 13489 | 	/* 5700 {AX,BX} chips have a broken status block link | 
 | 13490 | 	 * change bit implementation, so we must use the | 
 | 13491 | 	 * status register in those cases. | 
 | 13492 | 	 */ | 
 | 13493 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) | 
 | 13494 | 		tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG; | 
 | 13495 | 	else | 
 | 13496 | 		tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG; | 
 | 13497 |  | 
 | 13498 | 	/* The led_ctrl is set during tg3_phy_probe, here we might | 
 | 13499 | 	 * have to force the link status polling mechanism based | 
 | 13500 | 	 * upon subsystem IDs. | 
 | 13501 | 	 */ | 
 | 13502 | 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && | 
| Michael Chan | 007a880d | 2007-05-31 14:49:51 -0700 | [diff] [blame] | 13503 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13504 | 	    !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { | 
 | 13505 | 		tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | | 
 | 13506 | 				  TG3_FLAG_USE_LINKCHG_REG); | 
 | 13507 | 	} | 
 | 13508 |  | 
 | 13509 | 	/* For all SERDES we poll the MAC status register. */ | 
 | 13510 | 	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) | 
 | 13511 | 		tp->tg3_flags |= TG3_FLAG_POLL_SERDES; | 
 | 13512 | 	else | 
 | 13513 | 		tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; | 
 | 13514 |  | 
| Matt Carlson | ad82926 | 2008-11-21 17:16:16 -0800 | [diff] [blame] | 13515 | 	tp->rx_offset = NET_IP_ALIGN; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13516 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && | 
 | 13517 | 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) | 
 | 13518 | 		tp->rx_offset = 0; | 
 | 13519 |  | 
| Michael Chan | f92905d | 2006-06-29 20:14:29 -0700 | [diff] [blame] | 13520 | 	tp->rx_std_max_post = TG3_RX_RING_SIZE; | 
 | 13521 |  | 
 | 13522 | 	/* Increment the rx prod index on the rx std ring by at most | 
 | 13523 | 	 * 8 for these chips to workaround hw errata. | 
 | 13524 | 	 */ | 
 | 13525 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || | 
 | 13526 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 
 | 13527 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 
 | 13528 | 		tp->rx_std_max_post = 8; | 
 | 13529 |  | 
| Matt Carlson | 8ed5d97 | 2007-05-07 00:25:49 -0700 | [diff] [blame] | 13530 | 	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) | 
 | 13531 | 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & | 
 | 13532 | 				     PCIE_PWR_MGMT_L1_THRESH_MSK; | 
 | 13533 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13534 | 	return err; | 
 | 13535 | } | 
 | 13536 |  | 
| David S. Miller | 49b6e95f | 2007-03-29 01:38:42 -0700 | [diff] [blame] | 13537 | #ifdef CONFIG_SPARC | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13538 | static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) | 
 | 13539 | { | 
 | 13540 | 	struct net_device *dev = tp->dev; | 
 | 13541 | 	struct pci_dev *pdev = tp->pdev; | 
| David S. Miller | 49b6e95f | 2007-03-29 01:38:42 -0700 | [diff] [blame] | 13542 | 	struct device_node *dp = pci_device_to_OF_node(pdev); | 
| David S. Miller | 374d4ca | 2007-03-29 01:57:57 -0700 | [diff] [blame] | 13543 | 	const unsigned char *addr; | 
| David S. Miller | 49b6e95f | 2007-03-29 01:38:42 -0700 | [diff] [blame] | 13544 | 	int len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13545 |  | 
| David S. Miller | 49b6e95f | 2007-03-29 01:38:42 -0700 | [diff] [blame] | 13546 | 	addr = of_get_property(dp, "local-mac-address", &len); | 
 | 13547 | 	if (addr && len == 6) { | 
 | 13548 | 		memcpy(dev->dev_addr, addr, 6); | 
 | 13549 | 		memcpy(dev->perm_addr, dev->dev_addr, 6); | 
 | 13550 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13551 | 	} | 
 | 13552 | 	return -ENODEV; | 
 | 13553 | } | 
 | 13554 |  | 
 | 13555 | static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) | 
 | 13556 | { | 
 | 13557 | 	struct net_device *dev = tp->dev; | 
 | 13558 |  | 
 | 13559 | 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | 
| John W. Linville | 2ff4369 | 2005-09-12 14:44:20 -0700 | [diff] [blame] | 13560 | 	memcpy(dev->perm_addr, idprom->id_ethaddr, 6); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13561 | 	return 0; | 
 | 13562 | } | 
 | 13563 | #endif | 
 | 13564 |  | 
 | 13565 | static int __devinit tg3_get_device_address(struct tg3 *tp) | 
 | 13566 | { | 
 | 13567 | 	struct net_device *dev = tp->dev; | 
 | 13568 | 	u32 hi, lo, mac_offset; | 
| Michael Chan | 008652b | 2006-03-27 23:14:53 -0800 | [diff] [blame] | 13569 | 	int addr_ok = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13570 |  | 
| David S. Miller | 49b6e95f | 2007-03-29 01:38:42 -0700 | [diff] [blame] | 13571 | #ifdef CONFIG_SPARC | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13572 | 	if (!tg3_get_macaddr_sparc(tp)) | 
 | 13573 | 		return 0; | 
 | 13574 | #endif | 
 | 13575 |  | 
 | 13576 | 	mac_offset = 0x7c; | 
| David S. Miller | f49639e | 2006-06-09 11:58:36 -0700 | [diff] [blame] | 13577 | 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 13578 | 	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13579 | 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) | 
 | 13580 | 			mac_offset = 0xcc; | 
 | 13581 | 		if (tg3_nvram_lock(tp)) | 
 | 13582 | 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); | 
 | 13583 | 		else | 
 | 13584 | 			tg3_nvram_unlock(tp); | 
| Matt Carlson | a1b950d | 2009-09-01 13:20:17 +0000 | [diff] [blame] | 13585 | 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 
 | 13586 | 		if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC) | 
 | 13587 | 			mac_offset = 0xcc; | 
 | 13588 | 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 13589 | 		mac_offset = 0x10; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13590 |  | 
 | 13591 | 	/* First try to get it from MAC address mailbox. */ | 
 | 13592 | 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); | 
 | 13593 | 	if ((hi >> 16) == 0x484b) { | 
 | 13594 | 		dev->dev_addr[0] = (hi >>  8) & 0xff; | 
 | 13595 | 		dev->dev_addr[1] = (hi >>  0) & 0xff; | 
 | 13596 |  | 
 | 13597 | 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); | 
 | 13598 | 		dev->dev_addr[2] = (lo >> 24) & 0xff; | 
 | 13599 | 		dev->dev_addr[3] = (lo >> 16) & 0xff; | 
 | 13600 | 		dev->dev_addr[4] = (lo >>  8) & 0xff; | 
 | 13601 | 		dev->dev_addr[5] = (lo >>  0) & 0xff; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13602 |  | 
| Michael Chan | 008652b | 2006-03-27 23:14:53 -0800 | [diff] [blame] | 13603 | 		/* Some old bootcode may report a 0 MAC address in SRAM */ | 
 | 13604 | 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); | 
 | 13605 | 	} | 
 | 13606 | 	if (!addr_ok) { | 
 | 13607 | 		/* Next, try NVRAM. */ | 
| Matt Carlson | df259d8 | 2009-04-20 06:57:14 +0000 | [diff] [blame] | 13608 | 		if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) && | 
 | 13609 | 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && | 
| Matt Carlson | 6d348f2 | 2009-02-25 14:25:52 +0000 | [diff] [blame] | 13610 | 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { | 
| Matt Carlson | 62cedd1 | 2009-04-20 14:52:29 -0700 | [diff] [blame] | 13611 | 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); | 
 | 13612 | 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); | 
| Michael Chan | 008652b | 2006-03-27 23:14:53 -0800 | [diff] [blame] | 13613 | 		} | 
 | 13614 | 		/* Finally just fetch it out of the MAC control regs. */ | 
 | 13615 | 		else { | 
 | 13616 | 			hi = tr32(MAC_ADDR_0_HIGH); | 
 | 13617 | 			lo = tr32(MAC_ADDR_0_LOW); | 
 | 13618 |  | 
 | 13619 | 			dev->dev_addr[5] = lo & 0xff; | 
 | 13620 | 			dev->dev_addr[4] = (lo >> 8) & 0xff; | 
 | 13621 | 			dev->dev_addr[3] = (lo >> 16) & 0xff; | 
 | 13622 | 			dev->dev_addr[2] = (lo >> 24) & 0xff; | 
 | 13623 | 			dev->dev_addr[1] = hi & 0xff; | 
 | 13624 | 			dev->dev_addr[0] = (hi >> 8) & 0xff; | 
 | 13625 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13626 | 	} | 
 | 13627 |  | 
 | 13628 | 	if (!is_valid_ether_addr(&dev->dev_addr[0])) { | 
| David S. Miller | 7582a33 | 2008-03-20 15:53:15 -0700 | [diff] [blame] | 13629 | #ifdef CONFIG_SPARC | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13630 | 		if (!tg3_get_default_macaddr_sparc(tp)) | 
 | 13631 | 			return 0; | 
 | 13632 | #endif | 
 | 13633 | 		return -EINVAL; | 
 | 13634 | 	} | 
| John W. Linville | 2ff4369 | 2005-09-12 14:44:20 -0700 | [diff] [blame] | 13635 | 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13636 | 	return 0; | 
 | 13637 | } | 
 | 13638 |  | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 13639 | #define BOUNDARY_SINGLE_CACHELINE	1 | 
 | 13640 | #define BOUNDARY_MULTI_CACHELINE	2 | 
 | 13641 |  | 
 | 13642 | static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) | 
 | 13643 | { | 
 | 13644 | 	int cacheline_size; | 
 | 13645 | 	u8 byte; | 
 | 13646 | 	int goal; | 
 | 13647 |  | 
 | 13648 | 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); | 
 | 13649 | 	if (byte == 0) | 
 | 13650 | 		cacheline_size = 1024; | 
 | 13651 | 	else | 
 | 13652 | 		cacheline_size = (int) byte * 4; | 
 | 13653 |  | 
 | 13654 | 	/* On 5703 and later chips, the boundary bits have no | 
 | 13655 | 	 * effect. | 
 | 13656 | 	 */ | 
 | 13657 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 
 | 13658 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && | 
 | 13659 | 	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) | 
 | 13660 | 		goto out; | 
 | 13661 |  | 
 | 13662 | #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) | 
 | 13663 | 	goal = BOUNDARY_MULTI_CACHELINE; | 
 | 13664 | #else | 
 | 13665 | #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) | 
 | 13666 | 	goal = BOUNDARY_SINGLE_CACHELINE; | 
 | 13667 | #else | 
 | 13668 | 	goal = 0; | 
 | 13669 | #endif | 
 | 13670 | #endif | 
 | 13671 |  | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 13672 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 13673 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | 
| Matt Carlson | cbf9ca6 | 2009-11-13 13:03:40 +0000 | [diff] [blame] | 13674 | 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; | 
 | 13675 | 		goto out; | 
 | 13676 | 	} | 
 | 13677 |  | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 13678 | 	if (!goal) | 
 | 13679 | 		goto out; | 
 | 13680 |  | 
 | 13681 | 	/* PCI controllers on most RISC systems tend to disconnect | 
 | 13682 | 	 * when a device tries to burst across a cache-line boundary. | 
 | 13683 | 	 * Therefore, letting tg3 do so just wastes PCI bandwidth. | 
 | 13684 | 	 * | 
 | 13685 | 	 * Unfortunately, for PCI-E there are only limited | 
 | 13686 | 	 * write-side controls for this, and thus for reads | 
 | 13687 | 	 * we will still get the disconnects.  We'll also waste | 
 | 13688 | 	 * these PCI cycles for both read and write for chips | 
 | 13689 | 	 * other than 5700 and 5701 which do not implement the | 
 | 13690 | 	 * boundary bits. | 
 | 13691 | 	 */ | 
 | 13692 | 	if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) && | 
 | 13693 | 	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { | 
 | 13694 | 		switch (cacheline_size) { | 
 | 13695 | 		case 16: | 
 | 13696 | 		case 32: | 
 | 13697 | 		case 64: | 
 | 13698 | 		case 128: | 
 | 13699 | 			if (goal == BOUNDARY_SINGLE_CACHELINE) { | 
 | 13700 | 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | | 
 | 13701 | 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX); | 
 | 13702 | 			} else { | 
 | 13703 | 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | | 
 | 13704 | 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX); | 
 | 13705 | 			} | 
 | 13706 | 			break; | 
 | 13707 |  | 
 | 13708 | 		case 256: | 
 | 13709 | 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | | 
 | 13710 | 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX); | 
 | 13711 | 			break; | 
 | 13712 |  | 
 | 13713 | 		default: | 
 | 13714 | 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | | 
 | 13715 | 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX); | 
 | 13716 | 			break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 13717 | 		} | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 13718 | 	} else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 
 | 13719 | 		switch (cacheline_size) { | 
 | 13720 | 		case 16: | 
 | 13721 | 		case 32: | 
 | 13722 | 		case 64: | 
 | 13723 | 			if (goal == BOUNDARY_SINGLE_CACHELINE) { | 
 | 13724 | 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; | 
 | 13725 | 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; | 
 | 13726 | 				break; | 
 | 13727 | 			} | 
 | 13728 | 			/* fallthrough */ | 
 | 13729 | 		case 128: | 
 | 13730 | 		default: | 
 | 13731 | 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; | 
 | 13732 | 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; | 
 | 13733 | 			break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 13734 | 		} | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 13735 | 	} else { | 
 | 13736 | 		switch (cacheline_size) { | 
 | 13737 | 		case 16: | 
 | 13738 | 			if (goal == BOUNDARY_SINGLE_CACHELINE) { | 
 | 13739 | 				val |= (DMA_RWCTRL_READ_BNDRY_16 | | 
 | 13740 | 					DMA_RWCTRL_WRITE_BNDRY_16); | 
 | 13741 | 				break; | 
 | 13742 | 			} | 
 | 13743 | 			/* fallthrough */ | 
 | 13744 | 		case 32: | 
 | 13745 | 			if (goal == BOUNDARY_SINGLE_CACHELINE) { | 
 | 13746 | 				val |= (DMA_RWCTRL_READ_BNDRY_32 | | 
 | 13747 | 					DMA_RWCTRL_WRITE_BNDRY_32); | 
 | 13748 | 				break; | 
 | 13749 | 			} | 
 | 13750 | 			/* fallthrough */ | 
 | 13751 | 		case 64: | 
 | 13752 | 			if (goal == BOUNDARY_SINGLE_CACHELINE) { | 
 | 13753 | 				val |= (DMA_RWCTRL_READ_BNDRY_64 | | 
 | 13754 | 					DMA_RWCTRL_WRITE_BNDRY_64); | 
 | 13755 | 				break; | 
 | 13756 | 			} | 
 | 13757 | 			/* fallthrough */ | 
 | 13758 | 		case 128: | 
 | 13759 | 			if (goal == BOUNDARY_SINGLE_CACHELINE) { | 
 | 13760 | 				val |= (DMA_RWCTRL_READ_BNDRY_128 | | 
 | 13761 | 					DMA_RWCTRL_WRITE_BNDRY_128); | 
 | 13762 | 				break; | 
 | 13763 | 			} | 
 | 13764 | 			/* fallthrough */ | 
 | 13765 | 		case 256: | 
 | 13766 | 			val |= (DMA_RWCTRL_READ_BNDRY_256 | | 
 | 13767 | 				DMA_RWCTRL_WRITE_BNDRY_256); | 
 | 13768 | 			break; | 
 | 13769 | 		case 512: | 
 | 13770 | 			val |= (DMA_RWCTRL_READ_BNDRY_512 | | 
 | 13771 | 				DMA_RWCTRL_WRITE_BNDRY_512); | 
 | 13772 | 			break; | 
 | 13773 | 		case 1024: | 
 | 13774 | 		default: | 
 | 13775 | 			val |= (DMA_RWCTRL_READ_BNDRY_1024 | | 
 | 13776 | 				DMA_RWCTRL_WRITE_BNDRY_1024); | 
 | 13777 | 			break; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 13778 | 		} | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 13779 | 	} | 
 | 13780 |  | 
 | 13781 | out: | 
 | 13782 | 	return val; | 
 | 13783 | } | 
 | 13784 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13785 | static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) | 
 | 13786 | { | 
 | 13787 | 	struct tg3_internal_buffer_desc test_desc; | 
 | 13788 | 	u32 sram_dma_descs; | 
 | 13789 | 	int i, ret; | 
 | 13790 |  | 
 | 13791 | 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; | 
 | 13792 |  | 
 | 13793 | 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); | 
 | 13794 | 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); | 
 | 13795 | 	tw32(RDMAC_STATUS, 0); | 
 | 13796 | 	tw32(WDMAC_STATUS, 0); | 
 | 13797 |  | 
 | 13798 | 	tw32(BUFMGR_MODE, 0); | 
 | 13799 | 	tw32(FTQ_RESET, 0); | 
 | 13800 |  | 
 | 13801 | 	test_desc.addr_hi = ((u64) buf_dma) >> 32; | 
 | 13802 | 	test_desc.addr_lo = buf_dma & 0xffffffff; | 
 | 13803 | 	test_desc.nic_mbuf = 0x00002100; | 
 | 13804 | 	test_desc.len = size; | 
 | 13805 |  | 
 | 13806 | 	/* | 
 | 13807 | 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz | 
 | 13808 | 	 * the *second* time the tg3 driver was getting loaded after an | 
 | 13809 | 	 * initial scan. | 
 | 13810 | 	 * | 
 | 13811 | 	 * Broadcom tells me: | 
 | 13812 | 	 *   ...the DMA engine is connected to the GRC block and a DMA | 
 | 13813 | 	 *   reset may affect the GRC block in some unpredictable way... | 
 | 13814 | 	 *   The behavior of resets to individual blocks has not been tested. | 
 | 13815 | 	 * | 
 | 13816 | 	 * Broadcom noted the GRC reset will also reset all sub-components. | 
 | 13817 | 	 */ | 
 | 13818 | 	if (to_device) { | 
 | 13819 | 		test_desc.cqid_sqid = (13 << 8) | 2; | 
 | 13820 |  | 
 | 13821 | 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); | 
 | 13822 | 		udelay(40); | 
 | 13823 | 	} else { | 
 | 13824 | 		test_desc.cqid_sqid = (16 << 8) | 7; | 
 | 13825 |  | 
 | 13826 | 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); | 
 | 13827 | 		udelay(40); | 
 | 13828 | 	} | 
 | 13829 | 	test_desc.flags = 0x00000005; | 
 | 13830 |  | 
 | 13831 | 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { | 
 | 13832 | 		u32 val; | 
 | 13833 |  | 
 | 13834 | 		val = *(((u32 *)&test_desc) + i); | 
 | 13835 | 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, | 
 | 13836 | 				       sram_dma_descs + (i * sizeof(u32))); | 
 | 13837 | 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); | 
 | 13838 | 	} | 
 | 13839 | 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); | 
 | 13840 |  | 
 | 13841 | 	if (to_device) { | 
 | 13842 | 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); | 
 | 13843 | 	} else { | 
 | 13844 | 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); | 
 | 13845 | 	} | 
 | 13846 |  | 
 | 13847 | 	ret = -ENODEV; | 
 | 13848 | 	for (i = 0; i < 40; i++) { | 
 | 13849 | 		u32 val; | 
 | 13850 |  | 
 | 13851 | 		if (to_device) | 
 | 13852 | 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); | 
 | 13853 | 		else | 
 | 13854 | 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); | 
 | 13855 | 		if ((val & 0xffff) == sram_dma_descs) { | 
 | 13856 | 			ret = 0; | 
 | 13857 | 			break; | 
 | 13858 | 		} | 
 | 13859 |  | 
 | 13860 | 		udelay(100); | 
 | 13861 | 	} | 
 | 13862 |  | 
 | 13863 | 	return ret; | 
 | 13864 | } | 
 | 13865 |  | 
| David S. Miller | ded7340 | 2005-05-23 13:59:47 -0700 | [diff] [blame] | 13866 | #define TEST_BUFFER_SIZE	0x2000 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13867 |  | 
 | 13868 | static int __devinit tg3_test_dma(struct tg3 *tp) | 
 | 13869 | { | 
 | 13870 | 	dma_addr_t buf_dma; | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 13871 | 	u32 *buf, saved_dma_rwctrl; | 
| Matt Carlson | cbf9ca6 | 2009-11-13 13:03:40 +0000 | [diff] [blame] | 13872 | 	int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13873 |  | 
 | 13874 | 	buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); | 
 | 13875 | 	if (!buf) { | 
 | 13876 | 		ret = -ENOMEM; | 
 | 13877 | 		goto out_nofree; | 
 | 13878 | 	} | 
 | 13879 |  | 
 | 13880 | 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | | 
 | 13881 | 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); | 
 | 13882 |  | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 13883 | 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13884 |  | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 13885 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | 
 | 13886 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | 
| Matt Carlson | cbf9ca6 | 2009-11-13 13:03:40 +0000 | [diff] [blame] | 13887 | 		goto out; | 
 | 13888 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13889 | 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 
 | 13890 | 		/* DMA read watermark not used on PCIE */ | 
 | 13891 | 		tp->dma_rwctrl |= 0x00180000; | 
 | 13892 | 	} else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { | 
| Michael Chan | 85e94ce | 2005-04-21 17:05:28 -0700 | [diff] [blame] | 13893 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || | 
 | 13894 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13895 | 			tp->dma_rwctrl |= 0x003f0000; | 
 | 13896 | 		else | 
 | 13897 | 			tp->dma_rwctrl |= 0x003f000f; | 
 | 13898 | 	} else { | 
 | 13899 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 
 | 13900 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { | 
 | 13901 | 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); | 
| Michael Chan | 49afdeb | 2007-02-13 12:17:03 -0800 | [diff] [blame] | 13902 | 			u32 read_water = 0x7; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13903 |  | 
| Michael Chan | 4a29cc2 | 2006-03-19 13:21:12 -0800 | [diff] [blame] | 13904 | 			/* If the 5704 is behind the EPB bridge, we can | 
 | 13905 | 			 * do the less restrictive ONE_DMA workaround for | 
 | 13906 | 			 * better performance. | 
 | 13907 | 			 */ | 
 | 13908 | 			if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) && | 
 | 13909 | 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) | 
 | 13910 | 				tp->dma_rwctrl |= 0x8000; | 
 | 13911 | 			else if (ccval == 0x6 || ccval == 0x7) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13912 | 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; | 
 | 13913 |  | 
| Michael Chan | 49afdeb | 2007-02-13 12:17:03 -0800 | [diff] [blame] | 13914 | 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) | 
 | 13915 | 				read_water = 4; | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 13916 | 			/* Set bit 23 to enable PCIX hw bug fix */ | 
| Michael Chan | 49afdeb | 2007-02-13 12:17:03 -0800 | [diff] [blame] | 13917 | 			tp->dma_rwctrl |= | 
 | 13918 | 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) | | 
 | 13919 | 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | | 
 | 13920 | 				(1 << 23); | 
| Michael Chan | 4cf78e4 | 2005-07-25 12:29:19 -0700 | [diff] [blame] | 13921 | 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { | 
 | 13922 | 			/* 5780 always in PCIX mode */ | 
 | 13923 | 			tp->dma_rwctrl |= 0x00144000; | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 13924 | 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { | 
 | 13925 | 			/* 5714 always in PCIX mode */ | 
 | 13926 | 			tp->dma_rwctrl |= 0x00148000; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13927 | 		} else { | 
 | 13928 | 			tp->dma_rwctrl |= 0x001b000f; | 
 | 13929 | 		} | 
 | 13930 | 	} | 
 | 13931 |  | 
 | 13932 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || | 
 | 13933 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) | 
 | 13934 | 		tp->dma_rwctrl &= 0xfffffff0; | 
 | 13935 |  | 
 | 13936 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 
 | 13937 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { | 
 | 13938 | 		/* Remove this if it causes problems for some boards. */ | 
 | 13939 | 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; | 
 | 13940 |  | 
 | 13941 | 		/* On 5700/5701 chips, we need to set this bit. | 
 | 13942 | 		 * Otherwise the chip will issue cacheline transactions | 
 | 13943 | 		 * to streamable DMA memory with not all the byte | 
 | 13944 | 		 * enables turned on.  This is an error on several | 
 | 13945 | 		 * RISC PCI controllers, in particular sparc64. | 
 | 13946 | 		 * | 
 | 13947 | 		 * On 5703/5704 chips, this bit has been reassigned | 
 | 13948 | 		 * a different meaning.  In particular, it is used | 
 | 13949 | 		 * on those chips to enable a PCI-X workaround. | 
 | 13950 | 		 */ | 
 | 13951 | 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; | 
 | 13952 | 	} | 
 | 13953 |  | 
 | 13954 | 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 
 | 13955 |  | 
 | 13956 | #if 0 | 
 | 13957 | 	/* Unneeded, already done by tg3_get_invariants.  */ | 
 | 13958 | 	tg3_switch_clocks(tp); | 
 | 13959 | #endif | 
 | 13960 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13961 | 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 
 | 13962 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) | 
 | 13963 | 		goto out; | 
 | 13964 |  | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 13965 | 	/* It is best to perform DMA test with maximum write burst size | 
 | 13966 | 	 * to expose the 5700/5701 write DMA bug. | 
 | 13967 | 	 */ | 
 | 13968 | 	saved_dma_rwctrl = tp->dma_rwctrl; | 
 | 13969 | 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; | 
 | 13970 | 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 
 | 13971 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13972 | 	while (1) { | 
 | 13973 | 		u32 *p = buf, i; | 
 | 13974 |  | 
 | 13975 | 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) | 
 | 13976 | 			p[i] = i; | 
 | 13977 |  | 
 | 13978 | 		/* Send the buffer to the chip. */ | 
 | 13979 | 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); | 
 | 13980 | 		if (ret) { | 
 | 13981 | 			printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret); | 
 | 13982 | 			break; | 
 | 13983 | 		} | 
 | 13984 |  | 
 | 13985 | #if 0 | 
 | 13986 | 		/* validate data reached card RAM correctly. */ | 
 | 13987 | 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { | 
 | 13988 | 			u32 val; | 
 | 13989 | 			tg3_read_mem(tp, 0x2100 + (i*4), &val); | 
 | 13990 | 			if (le32_to_cpu(val) != p[i]) { | 
 | 13991 | 				printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i); | 
 | 13992 | 				/* ret = -ENODEV here? */ | 
 | 13993 | 			} | 
 | 13994 | 			p[i] = 0; | 
 | 13995 | 		} | 
 | 13996 | #endif | 
 | 13997 | 		/* Now read it back. */ | 
 | 13998 | 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); | 
 | 13999 | 		if (ret) { | 
 | 14000 | 			printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret); | 
 | 14001 |  | 
 | 14002 | 			break; | 
 | 14003 | 		} | 
 | 14004 |  | 
 | 14005 | 		/* Verify it. */ | 
 | 14006 | 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { | 
 | 14007 | 			if (p[i] == i) | 
 | 14008 | 				continue; | 
 | 14009 |  | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 14010 | 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != | 
 | 14011 | 			    DMA_RWCTRL_WRITE_BNDRY_16) { | 
 | 14012 | 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14013 | 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; | 
 | 14014 | 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 
 | 14015 | 				break; | 
 | 14016 | 			} else { | 
 | 14017 | 				printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i); | 
 | 14018 | 				ret = -ENODEV; | 
 | 14019 | 				goto out; | 
 | 14020 | 			} | 
 | 14021 | 		} | 
 | 14022 |  | 
 | 14023 | 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { | 
 | 14024 | 			/* Success. */ | 
 | 14025 | 			ret = 0; | 
 | 14026 | 			break; | 
 | 14027 | 		} | 
 | 14028 | 	} | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 14029 | 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != | 
 | 14030 | 	    DMA_RWCTRL_WRITE_BNDRY_16) { | 
| Michael Chan | 6d1cfba | 2005-06-08 14:13:14 -0700 | [diff] [blame] | 14031 | 		static struct pci_device_id dma_wait_state_chipsets[] = { | 
 | 14032 | 			{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, | 
 | 14033 | 				     PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, | 
 | 14034 | 			{ }, | 
 | 14035 | 		}; | 
 | 14036 |  | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 14037 | 		/* DMA test passed without adjusting DMA boundary, | 
| Michael Chan | 6d1cfba | 2005-06-08 14:13:14 -0700 | [diff] [blame] | 14038 | 		 * now look for chipsets that are known to expose the | 
 | 14039 | 		 * DMA bug without failing the test. | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 14040 | 		 */ | 
| Michael Chan | 6d1cfba | 2005-06-08 14:13:14 -0700 | [diff] [blame] | 14041 | 		if (pci_dev_present(dma_wait_state_chipsets)) { | 
 | 14042 | 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; | 
 | 14043 | 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; | 
 | 14044 | 		} | 
 | 14045 | 		else | 
 | 14046 | 			/* Safe to use the calculated DMA boundary. */ | 
 | 14047 | 			tp->dma_rwctrl = saved_dma_rwctrl; | 
 | 14048 |  | 
| David S. Miller | 59e6b43 | 2005-05-18 22:50:10 -0700 | [diff] [blame] | 14049 | 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); | 
 | 14050 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14051 |  | 
 | 14052 | out: | 
 | 14053 | 	pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); | 
 | 14054 | out_nofree: | 
 | 14055 | 	return ret; | 
 | 14056 | } | 
 | 14057 |  | 
 | 14058 | static void __devinit tg3_init_link_config(struct tg3 *tp) | 
 | 14059 | { | 
 | 14060 | 	tp->link_config.advertising = | 
 | 14061 | 		(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | 
 | 14062 | 		 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | 
 | 14063 | 		 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | | 
 | 14064 | 		 ADVERTISED_Autoneg | ADVERTISED_MII); | 
 | 14065 | 	tp->link_config.speed = SPEED_INVALID; | 
 | 14066 | 	tp->link_config.duplex = DUPLEX_INVALID; | 
 | 14067 | 	tp->link_config.autoneg = AUTONEG_ENABLE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14068 | 	tp->link_config.active_speed = SPEED_INVALID; | 
 | 14069 | 	tp->link_config.active_duplex = DUPLEX_INVALID; | 
 | 14070 | 	tp->link_config.phy_is_low_power = 0; | 
 | 14071 | 	tp->link_config.orig_speed = SPEED_INVALID; | 
 | 14072 | 	tp->link_config.orig_duplex = DUPLEX_INVALID; | 
 | 14073 | 	tp->link_config.orig_autoneg = AUTONEG_INVALID; | 
 | 14074 | } | 
 | 14075 |  | 
 | 14076 | static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) | 
 | 14077 | { | 
| Matt Carlson | f6eb9b1 | 2009-09-01 13:19:53 +0000 | [diff] [blame] | 14078 | 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS && | 
| Matt Carlson | b703df6 | 2009-12-03 08:36:21 +0000 | [diff] [blame] | 14079 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && | 
 | 14080 | 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { | 
| Michael Chan | fdfec17 | 2005-07-25 12:31:48 -0700 | [diff] [blame] | 14081 | 		tp->bufmgr_config.mbuf_read_dma_low_water = | 
 | 14082 | 			DEFAULT_MB_RDMA_LOW_WATER_5705; | 
 | 14083 | 		tp->bufmgr_config.mbuf_mac_rx_low_water = | 
 | 14084 | 			DEFAULT_MB_MACRX_LOW_WATER_5705; | 
 | 14085 | 		tp->bufmgr_config.mbuf_high_water = | 
 | 14086 | 			DEFAULT_MB_HIGH_WATER_5705; | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 14087 | 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 
 | 14088 | 			tp->bufmgr_config.mbuf_mac_rx_low_water = | 
 | 14089 | 				DEFAULT_MB_MACRX_LOW_WATER_5906; | 
 | 14090 | 			tp->bufmgr_config.mbuf_high_water = | 
 | 14091 | 				DEFAULT_MB_HIGH_WATER_5906; | 
 | 14092 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14093 |  | 
| Michael Chan | fdfec17 | 2005-07-25 12:31:48 -0700 | [diff] [blame] | 14094 | 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = | 
 | 14095 | 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; | 
 | 14096 | 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = | 
 | 14097 | 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; | 
 | 14098 | 		tp->bufmgr_config.mbuf_high_water_jumbo = | 
 | 14099 | 			DEFAULT_MB_HIGH_WATER_JUMBO_5780; | 
 | 14100 | 	} else { | 
 | 14101 | 		tp->bufmgr_config.mbuf_read_dma_low_water = | 
 | 14102 | 			DEFAULT_MB_RDMA_LOW_WATER; | 
 | 14103 | 		tp->bufmgr_config.mbuf_mac_rx_low_water = | 
 | 14104 | 			DEFAULT_MB_MACRX_LOW_WATER; | 
 | 14105 | 		tp->bufmgr_config.mbuf_high_water = | 
 | 14106 | 			DEFAULT_MB_HIGH_WATER; | 
 | 14107 |  | 
 | 14108 | 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = | 
 | 14109 | 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO; | 
 | 14110 | 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = | 
 | 14111 | 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO; | 
 | 14112 | 		tp->bufmgr_config.mbuf_high_water_jumbo = | 
 | 14113 | 			DEFAULT_MB_HIGH_WATER_JUMBO; | 
 | 14114 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14115 |  | 
 | 14116 | 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; | 
 | 14117 | 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; | 
 | 14118 | } | 
 | 14119 |  | 
 | 14120 | static char * __devinit tg3_phy_string(struct tg3 *tp) | 
 | 14121 | { | 
 | 14122 | 	switch (tp->phy_id & PHY_ID_MASK) { | 
 | 14123 | 	case PHY_ID_BCM5400:	return "5400"; | 
 | 14124 | 	case PHY_ID_BCM5401:	return "5401"; | 
 | 14125 | 	case PHY_ID_BCM5411:	return "5411"; | 
 | 14126 | 	case PHY_ID_BCM5701:	return "5701"; | 
 | 14127 | 	case PHY_ID_BCM5703:	return "5703"; | 
 | 14128 | 	case PHY_ID_BCM5704:	return "5704"; | 
 | 14129 | 	case PHY_ID_BCM5705:	return "5705"; | 
 | 14130 | 	case PHY_ID_BCM5750:	return "5750"; | 
| Michael Chan | 85e94ce | 2005-04-21 17:05:28 -0700 | [diff] [blame] | 14131 | 	case PHY_ID_BCM5752:	return "5752"; | 
| Michael Chan | a4e2b34 | 2005-10-26 15:46:52 -0700 | [diff] [blame] | 14132 | 	case PHY_ID_BCM5714:	return "5714"; | 
| Michael Chan | 4cf78e4 | 2005-07-25 12:29:19 -0700 | [diff] [blame] | 14133 | 	case PHY_ID_BCM5780:	return "5780"; | 
| Michael Chan | af36e6b | 2006-03-23 01:28:06 -0800 | [diff] [blame] | 14134 | 	case PHY_ID_BCM5755:	return "5755"; | 
| Michael Chan | d9ab5ad | 2006-03-20 22:27:35 -0800 | [diff] [blame] | 14135 | 	case PHY_ID_BCM5787:	return "5787"; | 
| Matt Carlson | d30cdd2 | 2007-10-07 23:28:35 -0700 | [diff] [blame] | 14136 | 	case PHY_ID_BCM5784:	return "5784"; | 
| Michael Chan | 126a336 | 2006-09-27 16:03:07 -0700 | [diff] [blame] | 14137 | 	case PHY_ID_BCM5756:	return "5722/5756"; | 
| Michael Chan | b5d3772 | 2006-09-27 16:06:21 -0700 | [diff] [blame] | 14138 | 	case PHY_ID_BCM5906:	return "5906"; | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 14139 | 	case PHY_ID_BCM5761:	return "5761"; | 
| Matt Carlson | c2060fe | 2009-11-13 13:03:33 +0000 | [diff] [blame] | 14140 | 	case PHY_ID_BCM5717:	return "5717"; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14141 | 	case PHY_ID_BCM8002:	return "8002/serdes"; | 
 | 14142 | 	case 0:			return "serdes"; | 
 | 14143 | 	default:		return "unknown"; | 
| Stephen Hemminger | 855e111 | 2008-04-16 16:37:28 -0700 | [diff] [blame] | 14144 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14145 | } | 
 | 14146 |  | 
| Michael Chan | f9804dd | 2005-09-27 12:13:10 -0700 | [diff] [blame] | 14147 | static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) | 
 | 14148 | { | 
 | 14149 | 	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 
 | 14150 | 		strcpy(str, "PCI Express"); | 
 | 14151 | 		return str; | 
 | 14152 | 	} else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { | 
 | 14153 | 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; | 
 | 14154 |  | 
 | 14155 | 		strcpy(str, "PCIX:"); | 
 | 14156 |  | 
 | 14157 | 		if ((clock_ctrl == 7) || | 
 | 14158 | 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == | 
 | 14159 | 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE)) | 
 | 14160 | 			strcat(str, "133MHz"); | 
 | 14161 | 		else if (clock_ctrl == 0) | 
 | 14162 | 			strcat(str, "33MHz"); | 
 | 14163 | 		else if (clock_ctrl == 2) | 
 | 14164 | 			strcat(str, "50MHz"); | 
 | 14165 | 		else if (clock_ctrl == 4) | 
 | 14166 | 			strcat(str, "66MHz"); | 
 | 14167 | 		else if (clock_ctrl == 6) | 
 | 14168 | 			strcat(str, "100MHz"); | 
| Michael Chan | f9804dd | 2005-09-27 12:13:10 -0700 | [diff] [blame] | 14169 | 	} else { | 
 | 14170 | 		strcpy(str, "PCI:"); | 
 | 14171 | 		if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) | 
 | 14172 | 			strcat(str, "66MHz"); | 
 | 14173 | 		else | 
 | 14174 | 			strcat(str, "33MHz"); | 
 | 14175 | 	} | 
 | 14176 | 	if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) | 
 | 14177 | 		strcat(str, ":32-bit"); | 
 | 14178 | 	else | 
 | 14179 | 		strcat(str, ":64-bit"); | 
 | 14180 | 	return str; | 
 | 14181 | } | 
 | 14182 |  | 
| Michael Chan | 8c2dc7e | 2005-12-19 16:26:02 -0800 | [diff] [blame] | 14183 | static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14184 | { | 
 | 14185 | 	struct pci_dev *peer; | 
 | 14186 | 	unsigned int func, devnr = tp->pdev->devfn & ~7; | 
 | 14187 |  | 
 | 14188 | 	for (func = 0; func < 8; func++) { | 
 | 14189 | 		peer = pci_get_slot(tp->pdev->bus, devnr | func); | 
 | 14190 | 		if (peer && peer != tp->pdev) | 
 | 14191 | 			break; | 
 | 14192 | 		pci_dev_put(peer); | 
 | 14193 | 	} | 
| Michael Chan | 16fe9d7 | 2005-12-13 21:09:54 -0800 | [diff] [blame] | 14194 | 	/* 5704 can be configured in single-port mode, set peer to | 
 | 14195 | 	 * tp->pdev in that case. | 
 | 14196 | 	 */ | 
 | 14197 | 	if (!peer) { | 
 | 14198 | 		peer = tp->pdev; | 
 | 14199 | 		return peer; | 
 | 14200 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14201 |  | 
 | 14202 | 	/* | 
 | 14203 | 	 * We don't need to keep the refcount elevated; there's no way | 
 | 14204 | 	 * to remove one half of this device without removing the other | 
 | 14205 | 	 */ | 
 | 14206 | 	pci_dev_put(peer); | 
 | 14207 |  | 
 | 14208 | 	return peer; | 
 | 14209 | } | 
 | 14210 |  | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 14211 | static void __devinit tg3_init_coal(struct tg3 *tp) | 
 | 14212 | { | 
 | 14213 | 	struct ethtool_coalesce *ec = &tp->coal; | 
 | 14214 |  | 
 | 14215 | 	memset(ec, 0, sizeof(*ec)); | 
 | 14216 | 	ec->cmd = ETHTOOL_GCOALESCE; | 
 | 14217 | 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; | 
 | 14218 | 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; | 
 | 14219 | 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; | 
 | 14220 | 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; | 
 | 14221 | 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; | 
 | 14222 | 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; | 
 | 14223 | 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; | 
 | 14224 | 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; | 
 | 14225 | 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; | 
 | 14226 |  | 
 | 14227 | 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | | 
 | 14228 | 				 HOSTCC_MODE_CLRTICK_TXBD)) { | 
 | 14229 | 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; | 
 | 14230 | 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; | 
 | 14231 | 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; | 
 | 14232 | 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; | 
 | 14233 | 	} | 
| Michael Chan | d244c89 | 2005-07-05 14:42:33 -0700 | [diff] [blame] | 14234 |  | 
 | 14235 | 	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { | 
 | 14236 | 		ec->rx_coalesce_usecs_irq = 0; | 
 | 14237 | 		ec->tx_coalesce_usecs_irq = 0; | 
 | 14238 | 		ec->stats_block_coalesce_usecs = 0; | 
 | 14239 | 	} | 
| David S. Miller | 15f9850 | 2005-05-18 22:49:26 -0700 | [diff] [blame] | 14240 | } | 
 | 14241 |  | 
| Stephen Hemminger | 7c7d64b | 2008-11-19 22:25:36 -0800 | [diff] [blame] | 14242 | static const struct net_device_ops tg3_netdev_ops = { | 
 | 14243 | 	.ndo_open		= tg3_open, | 
 | 14244 | 	.ndo_stop		= tg3_close, | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 14245 | 	.ndo_start_xmit		= tg3_start_xmit, | 
 | 14246 | 	.ndo_get_stats		= tg3_get_stats, | 
 | 14247 | 	.ndo_validate_addr	= eth_validate_addr, | 
 | 14248 | 	.ndo_set_multicast_list	= tg3_set_rx_mode, | 
 | 14249 | 	.ndo_set_mac_address	= tg3_set_mac_addr, | 
 | 14250 | 	.ndo_do_ioctl		= tg3_ioctl, | 
 | 14251 | 	.ndo_tx_timeout		= tg3_tx_timeout, | 
 | 14252 | 	.ndo_change_mtu		= tg3_change_mtu, | 
 | 14253 | #if TG3_VLAN_TAG_USED | 
 | 14254 | 	.ndo_vlan_rx_register	= tg3_vlan_rx_register, | 
 | 14255 | #endif | 
 | 14256 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 14257 | 	.ndo_poll_controller	= tg3_poll_controller, | 
 | 14258 | #endif | 
 | 14259 | }; | 
 | 14260 |  | 
 | 14261 | static const struct net_device_ops tg3_netdev_ops_dma_bug = { | 
 | 14262 | 	.ndo_open		= tg3_open, | 
 | 14263 | 	.ndo_stop		= tg3_close, | 
 | 14264 | 	.ndo_start_xmit		= tg3_start_xmit_dma_bug, | 
| Stephen Hemminger | 7c7d64b | 2008-11-19 22:25:36 -0800 | [diff] [blame] | 14265 | 	.ndo_get_stats		= tg3_get_stats, | 
 | 14266 | 	.ndo_validate_addr	= eth_validate_addr, | 
 | 14267 | 	.ndo_set_multicast_list	= tg3_set_rx_mode, | 
 | 14268 | 	.ndo_set_mac_address	= tg3_set_mac_addr, | 
 | 14269 | 	.ndo_do_ioctl		= tg3_ioctl, | 
 | 14270 | 	.ndo_tx_timeout		= tg3_tx_timeout, | 
 | 14271 | 	.ndo_change_mtu		= tg3_change_mtu, | 
 | 14272 | #if TG3_VLAN_TAG_USED | 
 | 14273 | 	.ndo_vlan_rx_register	= tg3_vlan_rx_register, | 
 | 14274 | #endif | 
 | 14275 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 14276 | 	.ndo_poll_controller	= tg3_poll_controller, | 
 | 14277 | #endif | 
 | 14278 | }; | 
 | 14279 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14280 | static int __devinit tg3_init_one(struct pci_dev *pdev, | 
 | 14281 | 				  const struct pci_device_id *ent) | 
 | 14282 | { | 
 | 14283 | 	static int tg3_version_printed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14284 | 	struct net_device *dev; | 
 | 14285 | 	struct tg3 *tp; | 
| Matt Carlson | 646c9ed | 2009-09-01 12:58:41 +0000 | [diff] [blame] | 14286 | 	int i, err, pm_cap; | 
 | 14287 | 	u32 sndmbx, rcvmbx, intmbx; | 
| Michael Chan | f9804dd | 2005-09-27 12:13:10 -0700 | [diff] [blame] | 14288 | 	char str[40]; | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 14289 | 	u64 dma_mask, persist_dma_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14290 |  | 
 | 14291 | 	if (tg3_version_printed++ == 0) | 
 | 14292 | 		printk(KERN_INFO "%s", version); | 
 | 14293 |  | 
 | 14294 | 	err = pci_enable_device(pdev); | 
 | 14295 | 	if (err) { | 
 | 14296 | 		printk(KERN_ERR PFX "Cannot enable PCI device, " | 
 | 14297 | 		       "aborting.\n"); | 
 | 14298 | 		return err; | 
 | 14299 | 	} | 
 | 14300 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14301 | 	err = pci_request_regions(pdev, DRV_MODULE_NAME); | 
 | 14302 | 	if (err) { | 
 | 14303 | 		printk(KERN_ERR PFX "Cannot obtain PCI resources, " | 
 | 14304 | 		       "aborting.\n"); | 
 | 14305 | 		goto err_out_disable_pdev; | 
 | 14306 | 	} | 
 | 14307 |  | 
 | 14308 | 	pci_set_master(pdev); | 
 | 14309 |  | 
 | 14310 | 	/* Find power-management capability. */ | 
 | 14311 | 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | 
 | 14312 | 	if (pm_cap == 0) { | 
 | 14313 | 		printk(KERN_ERR PFX "Cannot find PowerManagement capability, " | 
 | 14314 | 		       "aborting.\n"); | 
 | 14315 | 		err = -EIO; | 
 | 14316 | 		goto err_out_free_res; | 
 | 14317 | 	} | 
 | 14318 |  | 
| Matt Carlson | fe5f578 | 2009-09-01 13:09:39 +0000 | [diff] [blame] | 14319 | 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14320 | 	if (!dev) { | 
 | 14321 | 		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | 
 | 14322 | 		err = -ENOMEM; | 
 | 14323 | 		goto err_out_free_res; | 
 | 14324 | 	} | 
 | 14325 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14326 | 	SET_NETDEV_DEV(dev, &pdev->dev); | 
 | 14327 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14328 | #if TG3_VLAN_TAG_USED | 
 | 14329 | 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14330 | #endif | 
 | 14331 |  | 
 | 14332 | 	tp = netdev_priv(dev); | 
 | 14333 | 	tp->pdev = pdev; | 
 | 14334 | 	tp->dev = dev; | 
 | 14335 | 	tp->pm_cap = pm_cap; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14336 | 	tp->rx_mode = TG3_DEF_RX_MODE; | 
 | 14337 | 	tp->tx_mode = TG3_DEF_TX_MODE; | 
| Matt Carlson | 8ef2142 | 2008-05-02 16:47:53 -0700 | [diff] [blame] | 14338 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14339 | 	if (tg3_debug > 0) | 
 | 14340 | 		tp->msg_enable = tg3_debug; | 
 | 14341 | 	else | 
 | 14342 | 		tp->msg_enable = TG3_DEF_MSG_ENABLE; | 
 | 14343 |  | 
 | 14344 | 	/* The word/byte swap controls here control register access byte | 
 | 14345 | 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE | 
 | 14346 | 	 * setting below. | 
 | 14347 | 	 */ | 
 | 14348 | 	tp->misc_host_ctrl = | 
 | 14349 | 		MISC_HOST_CTRL_MASK_PCI_INT | | 
 | 14350 | 		MISC_HOST_CTRL_WORD_SWAP | | 
 | 14351 | 		MISC_HOST_CTRL_INDIR_ACCESS | | 
 | 14352 | 		MISC_HOST_CTRL_PCISTATE_RW; | 
 | 14353 |  | 
 | 14354 | 	/* The NONFRM (non-frame) byte/word swap controls take effect | 
 | 14355 | 	 * on descriptor entries, anything which isn't packet data. | 
 | 14356 | 	 * | 
 | 14357 | 	 * The StrongARM chips on the board (one for tx, one for rx) | 
 | 14358 | 	 * are running in big-endian mode. | 
 | 14359 | 	 */ | 
 | 14360 | 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | | 
 | 14361 | 			GRC_MODE_WSWAP_NONFRM_DATA); | 
 | 14362 | #ifdef __BIG_ENDIAN | 
 | 14363 | 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; | 
 | 14364 | #endif | 
 | 14365 | 	spin_lock_init(&tp->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14366 | 	spin_lock_init(&tp->indirect_lock); | 
| David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 14367 | 	INIT_WORK(&tp->reset_task, tg3_reset_task); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14368 |  | 
| Matt Carlson | d5fe488 | 2008-11-21 17:20:32 -0800 | [diff] [blame] | 14369 | 	tp->regs = pci_ioremap_bar(pdev, BAR_0); | 
| Andy Gospodarek | ab0049b | 2007-09-06 20:42:14 +0100 | [diff] [blame] | 14370 | 	if (!tp->regs) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14371 | 		printk(KERN_ERR PFX "Cannot map device registers, " | 
 | 14372 | 		       "aborting.\n"); | 
 | 14373 | 		err = -ENOMEM; | 
 | 14374 | 		goto err_out_free_dev; | 
 | 14375 | 	} | 
 | 14376 |  | 
 | 14377 | 	tg3_init_link_config(tp); | 
 | 14378 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14379 | 	tp->rx_pending = TG3_DEF_RX_RING_PENDING; | 
 | 14380 | 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14381 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14382 | 	dev->ethtool_ops = &tg3_ethtool_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14383 | 	dev->watchdog_timeo = TG3_TX_TIMEOUT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14384 | 	dev->irq = pdev->irq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14385 |  | 
 | 14386 | 	err = tg3_get_invariants(tp); | 
 | 14387 | 	if (err) { | 
 | 14388 | 		printk(KERN_ERR PFX "Problem fetching invariants of chip, " | 
 | 14389 | 		       "aborting.\n"); | 
 | 14390 | 		goto err_out_iounmap; | 
 | 14391 | 	} | 
 | 14392 |  | 
| Matt Carlson | 615774f | 2009-11-13 13:03:39 +0000 | [diff] [blame] | 14393 | 	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && | 
 | 14394 | 	    tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) | 
| Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 14395 | 		dev->netdev_ops = &tg3_netdev_ops; | 
 | 14396 | 	else | 
 | 14397 | 		dev->netdev_ops = &tg3_netdev_ops_dma_bug; | 
 | 14398 |  | 
 | 14399 |  | 
| Michael Chan | 4a29cc2 | 2006-03-19 13:21:12 -0800 | [diff] [blame] | 14400 | 	/* The EPB bridge inside 5714, 5715, and 5780 and any | 
 | 14401 | 	 * device behind the EPB cannot support DMA addresses > 40-bit. | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 14402 | 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask. | 
 | 14403 | 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and | 
 | 14404 | 	 * do DMA address check in tg3_start_xmit(). | 
 | 14405 | 	 */ | 
| Michael Chan | 4a29cc2 | 2006-03-19 13:21:12 -0800 | [diff] [blame] | 14406 | 	if (tp->tg3_flags2 & TG3_FLG2_IS_5788) | 
| Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 14407 | 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32); | 
| Michael Chan | 4a29cc2 | 2006-03-19 13:21:12 -0800 | [diff] [blame] | 14408 | 	else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) { | 
| Yang Hongyang | 50cf156 | 2009-04-06 19:01:14 -0700 | [diff] [blame] | 14409 | 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40); | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 14410 | #ifdef CONFIG_HIGHMEM | 
| Yang Hongyang | 6a35528 | 2009-04-06 19:01:13 -0700 | [diff] [blame] | 14411 | 		dma_mask = DMA_BIT_MASK(64); | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 14412 | #endif | 
| Michael Chan | 4a29cc2 | 2006-03-19 13:21:12 -0800 | [diff] [blame] | 14413 | 	} else | 
| Yang Hongyang | 6a35528 | 2009-04-06 19:01:13 -0700 | [diff] [blame] | 14414 | 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64); | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 14415 |  | 
 | 14416 | 	/* Configure DMA attributes. */ | 
| Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 14417 | 	if (dma_mask > DMA_BIT_MASK(32)) { | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 14418 | 		err = pci_set_dma_mask(pdev, dma_mask); | 
 | 14419 | 		if (!err) { | 
 | 14420 | 			dev->features |= NETIF_F_HIGHDMA; | 
 | 14421 | 			err = pci_set_consistent_dma_mask(pdev, | 
 | 14422 | 							  persist_dma_mask); | 
 | 14423 | 			if (err < 0) { | 
 | 14424 | 				printk(KERN_ERR PFX "Unable to obtain 64 bit " | 
 | 14425 | 				       "DMA for consistent allocations\n"); | 
 | 14426 | 				goto err_out_iounmap; | 
 | 14427 | 			} | 
 | 14428 | 		} | 
 | 14429 | 	} | 
| Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 14430 | 	if (err || dma_mask == DMA_BIT_MASK(32)) { | 
 | 14431 | 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 
| Michael Chan | 72f2afb | 2006-03-06 19:28:35 -0800 | [diff] [blame] | 14432 | 		if (err) { | 
 | 14433 | 			printk(KERN_ERR PFX "No usable DMA configuration, " | 
 | 14434 | 			       "aborting.\n"); | 
 | 14435 | 			goto err_out_iounmap; | 
 | 14436 | 		} | 
 | 14437 | 	} | 
 | 14438 |  | 
| Michael Chan | fdfec17 | 2005-07-25 12:31:48 -0700 | [diff] [blame] | 14439 | 	tg3_init_bufmgr_config(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14440 |  | 
| Matt Carlson | 507399f | 2009-11-13 13:03:37 +0000 | [diff] [blame] | 14441 | 	/* Selectively allow TSO based on operating conditions */ | 
 | 14442 | 	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || | 
 | 14443 | 	    (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) | 
 | 14444 | 		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 
 | 14445 | 	else { | 
 | 14446 | 		tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); | 
 | 14447 | 		tp->fw_needed = NULL; | 
 | 14448 | 	} | 
 | 14449 |  | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 14450 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) | 
| Matt Carlson | 9e9fd12 | 2009-01-19 16:57:45 -0800 | [diff] [blame] | 14451 | 		tp->fw_needed = FIRMWARE_TG3; | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 14452 |  | 
| Michael Chan | 4e3a7aa | 2006-03-20 17:47:44 -0800 | [diff] [blame] | 14453 | 	/* TSO is on by default on chips that support hardware TSO. | 
 | 14454 | 	 * Firmware TSO on older chips gives lower performance, so it | 
 | 14455 | 	 * is off by default, but can be enabled using ethtool. | 
 | 14456 | 	 */ | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 14457 | 	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && | 
 | 14458 | 	    (dev->features & NETIF_F_IP_CSUM)) | 
 | 14459 | 		dev->features |= NETIF_F_TSO; | 
 | 14460 |  | 
 | 14461 | 	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || | 
 | 14462 | 	    (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { | 
 | 14463 | 		if (dev->features & NETIF_F_IPV6_CSUM) | 
| Michael Chan | b002662 | 2006-07-03 19:42:14 -0700 | [diff] [blame] | 14464 | 			dev->features |= NETIF_F_TSO6; | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 14465 | 		if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || | 
 | 14466 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 
| Matt Carlson | 57e6983 | 2008-05-25 23:48:31 -0700 | [diff] [blame] | 14467 | 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 
 | 14468 | 		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || | 
| Matt Carlson | 321d32a | 2008-11-21 17:22:19 -0800 | [diff] [blame] | 14469 | 			GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 
| Matt Carlson | e849cdc | 2009-11-13 13:03:38 +0000 | [diff] [blame] | 14470 | 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 
| Matt Carlson | 9936bcf | 2007-10-10 18:03:07 -0700 | [diff] [blame] | 14471 | 			dev->features |= NETIF_F_TSO_ECN; | 
| Michael Chan | b002662 | 2006-07-03 19:42:14 -0700 | [diff] [blame] | 14472 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14473 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14474 | 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && | 
 | 14475 | 	    !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && | 
 | 14476 | 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { | 
 | 14477 | 		tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64; | 
 | 14478 | 		tp->rx_pending = 63; | 
 | 14479 | 	} | 
 | 14480 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14481 | 	err = tg3_get_device_address(tp); | 
 | 14482 | 	if (err) { | 
 | 14483 | 		printk(KERN_ERR PFX "Could not obtain valid ethernet address, " | 
 | 14484 | 		       "aborting.\n"); | 
| Matt Carlson | 026a6c2 | 2009-12-03 08:36:24 +0000 | [diff] [blame] | 14485 | 		goto err_out_iounmap; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14486 | 	} | 
 | 14487 |  | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 14488 | 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 
| Matt Carlson | 6353239 | 2008-11-03 16:49:57 -0800 | [diff] [blame] | 14489 | 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2); | 
| Al Viro | 79ea13c | 2008-01-24 02:06:46 -0800 | [diff] [blame] | 14490 | 		if (!tp->aperegs) { | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 14491 | 			printk(KERN_ERR PFX "Cannot map APE registers, " | 
 | 14492 | 			       "aborting.\n"); | 
 | 14493 | 			err = -ENOMEM; | 
| Matt Carlson | 026a6c2 | 2009-12-03 08:36:24 +0000 | [diff] [blame] | 14494 | 			goto err_out_iounmap; | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 14495 | 		} | 
 | 14496 |  | 
 | 14497 | 		tg3_ape_lock_init(tp); | 
| Matt Carlson | 7fd7644 | 2009-02-25 14:27:20 +0000 | [diff] [blame] | 14498 |  | 
 | 14499 | 		if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) | 
 | 14500 | 			tg3_read_dash_ver(tp); | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 14501 | 	} | 
 | 14502 |  | 
| Matt Carlson | c88864d | 2007-11-12 21:07:01 -0800 | [diff] [blame] | 14503 | 	/* | 
 | 14504 | 	 * Reset chip in case UNDI or EFI driver did not shutdown | 
 | 14505 | 	 * DMA self test will enable WDMAC and we'll see (spurious) | 
 | 14506 | 	 * pending DMA on the PCI bus at that point. | 
 | 14507 | 	 */ | 
 | 14508 | 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || | 
 | 14509 | 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { | 
 | 14510 | 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); | 
 | 14511 | 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
 | 14512 | 	} | 
 | 14513 |  | 
 | 14514 | 	err = tg3_test_dma(tp); | 
 | 14515 | 	if (err) { | 
 | 14516 | 		printk(KERN_ERR PFX "DMA engine test failed, aborting.\n"); | 
 | 14517 | 		goto err_out_apeunmap; | 
 | 14518 | 	} | 
 | 14519 |  | 
| Matt Carlson | c88864d | 2007-11-12 21:07:01 -0800 | [diff] [blame] | 14520 | 	/* flow control autonegotiation is default behavior */ | 
 | 14521 | 	tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 
| Steve Glendinning | e18ce34 | 2008-12-16 02:00:00 -0800 | [diff] [blame] | 14522 | 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; | 
| Matt Carlson | c88864d | 2007-11-12 21:07:01 -0800 | [diff] [blame] | 14523 |  | 
| Matt Carlson | 78f90dc | 2009-11-13 13:03:42 +0000 | [diff] [blame] | 14524 | 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; | 
 | 14525 | 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; | 
 | 14526 | 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; | 
 | 14527 | 	for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { | 
 | 14528 | 		struct tg3_napi *tnapi = &tp->napi[i]; | 
 | 14529 |  | 
 | 14530 | 		tnapi->tp = tp; | 
 | 14531 | 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; | 
 | 14532 |  | 
 | 14533 | 		tnapi->int_mbox = intmbx; | 
 | 14534 | 		if (i < 4) | 
 | 14535 | 			intmbx += 0x8; | 
 | 14536 | 		else | 
 | 14537 | 			intmbx += 0x4; | 
 | 14538 |  | 
 | 14539 | 		tnapi->consmbox = rcvmbx; | 
 | 14540 | 		tnapi->prodmbox = sndmbx; | 
 | 14541 |  | 
 | 14542 | 		if (i) { | 
 | 14543 | 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); | 
 | 14544 | 			netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); | 
 | 14545 | 		} else { | 
 | 14546 | 			tnapi->coal_now = HOSTCC_MODE_NOW; | 
 | 14547 | 			netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); | 
 | 14548 | 		} | 
 | 14549 |  | 
 | 14550 | 		if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) | 
 | 14551 | 			break; | 
 | 14552 |  | 
 | 14553 | 		/* | 
 | 14554 | 		 * If we support MSIX, we'll be using RSS.  If we're using | 
 | 14555 | 		 * RSS, the first vector only handles link interrupts and the | 
 | 14556 | 		 * remaining vectors handle rx and tx interrupts.  Reuse the | 
 | 14557 | 		 * mailbox values for the next iteration.  The values we setup | 
 | 14558 | 		 * above are still useful for the single vectored mode. | 
 | 14559 | 		 */ | 
 | 14560 | 		if (!i) | 
 | 14561 | 			continue; | 
 | 14562 |  | 
 | 14563 | 		rcvmbx += 0x8; | 
 | 14564 |  | 
 | 14565 | 		if (sndmbx & 0x4) | 
 | 14566 | 			sndmbx -= 0x4; | 
 | 14567 | 		else | 
 | 14568 | 			sndmbx += 0xc; | 
 | 14569 | 	} | 
 | 14570 |  | 
| Matt Carlson | c88864d | 2007-11-12 21:07:01 -0800 | [diff] [blame] | 14571 | 	tg3_init_coal(tp); | 
 | 14572 |  | 
| Michael Chan | c49a156 | 2006-12-17 17:07:29 -0800 | [diff] [blame] | 14573 | 	pci_set_drvdata(pdev, dev); | 
 | 14574 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14575 | 	err = register_netdev(dev); | 
 | 14576 | 	if (err) { | 
 | 14577 | 		printk(KERN_ERR PFX "Cannot register net device, " | 
 | 14578 | 		       "aborting.\n"); | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 14579 | 		goto err_out_apeunmap; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14580 | 	} | 
 | 14581 |  | 
| Matt Carlson | df59c94 | 2008-11-03 16:52:56 -0800 | [diff] [blame] | 14582 | 	printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14583 | 	       dev->name, | 
 | 14584 | 	       tp->board_part_number, | 
 | 14585 | 	       tp->pci_chip_rev_id, | 
| Michael Chan | f9804dd | 2005-09-27 12:13:10 -0700 | [diff] [blame] | 14586 | 	       tg3_bus_string(tp, str), | 
| Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 14587 | 	       dev->dev_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14588 |  | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 14589 | 	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { | 
 | 14590 | 		struct phy_device *phydev; | 
 | 14591 | 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | 
| Matt Carlson | df59c94 | 2008-11-03 16:52:56 -0800 | [diff] [blame] | 14592 | 		printk(KERN_INFO | 
 | 14593 | 		       "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", | 
| Matt Carlson | 3f0e3ad | 2009-11-02 14:24:36 +0000 | [diff] [blame] | 14594 | 		       tp->dev->name, phydev->drv->name, | 
 | 14595 | 		       dev_name(&phydev->dev)); | 
 | 14596 | 	} else | 
| Matt Carlson | df59c94 | 2008-11-03 16:52:56 -0800 | [diff] [blame] | 14597 | 		printk(KERN_INFO | 
 | 14598 | 		       "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", | 
 | 14599 | 		       tp->dev->name, tg3_phy_string(tp), | 
 | 14600 | 		       ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : | 
 | 14601 | 			((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : | 
 | 14602 | 			 "10/100/1000Base-T")), | 
 | 14603 | 		       (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0); | 
 | 14604 |  | 
 | 14605 | 	printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14606 | 	       dev->name, | 
 | 14607 | 	       (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, | 
 | 14608 | 	       (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, | 
 | 14609 | 	       (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, | 
 | 14610 | 	       (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14611 | 	       (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); | 
| Michael Chan | 4a29cc2 | 2006-03-19 13:21:12 -0800 | [diff] [blame] | 14612 | 	printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n", | 
 | 14613 | 	       dev->name, tp->dma_rwctrl, | 
| Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 14614 | 	       (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 : | 
| Yang Hongyang | 50cf156 | 2009-04-06 19:01:14 -0700 | [diff] [blame] | 14615 | 	        (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14616 |  | 
 | 14617 | 	return 0; | 
 | 14618 |  | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 14619 | err_out_apeunmap: | 
 | 14620 | 	if (tp->aperegs) { | 
 | 14621 | 		iounmap(tp->aperegs); | 
 | 14622 | 		tp->aperegs = NULL; | 
 | 14623 | 	} | 
 | 14624 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14625 | err_out_iounmap: | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 14626 | 	if (tp->regs) { | 
 | 14627 | 		iounmap(tp->regs); | 
| Peter Hagervall | 22abe31 | 2005-09-16 17:01:03 -0700 | [diff] [blame] | 14628 | 		tp->regs = NULL; | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 14629 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14630 |  | 
 | 14631 | err_out_free_dev: | 
 | 14632 | 	free_netdev(dev); | 
 | 14633 |  | 
 | 14634 | err_out_free_res: | 
 | 14635 | 	pci_release_regions(pdev); | 
 | 14636 |  | 
 | 14637 | err_out_disable_pdev: | 
 | 14638 | 	pci_disable_device(pdev); | 
 | 14639 | 	pci_set_drvdata(pdev, NULL); | 
 | 14640 | 	return err; | 
 | 14641 | } | 
 | 14642 |  | 
 | 14643 | static void __devexit tg3_remove_one(struct pci_dev *pdev) | 
 | 14644 | { | 
 | 14645 | 	struct net_device *dev = pci_get_drvdata(pdev); | 
 | 14646 |  | 
 | 14647 | 	if (dev) { | 
 | 14648 | 		struct tg3 *tp = netdev_priv(dev); | 
 | 14649 |  | 
| Jaswinder Singh Rajput | 077f849 | 2009-01-04 16:11:25 -0800 | [diff] [blame] | 14650 | 		if (tp->fw) | 
 | 14651 | 			release_firmware(tp->fw); | 
 | 14652 |  | 
| Michael Chan | 7faa006 | 2006-02-02 17:29:28 -0800 | [diff] [blame] | 14653 | 		flush_scheduled_work(); | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 14654 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 14655 | 		if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 
 | 14656 | 			tg3_phy_fini(tp); | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 14657 | 			tg3_mdio_fini(tp); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 14658 | 		} | 
| Matt Carlson | 158d7ab | 2008-05-29 01:37:54 -0700 | [diff] [blame] | 14659 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14660 | 		unregister_netdev(dev); | 
| Matt Carlson | 0d3031d | 2007-10-10 18:02:43 -0700 | [diff] [blame] | 14661 | 		if (tp->aperegs) { | 
 | 14662 | 			iounmap(tp->aperegs); | 
 | 14663 | 			tp->aperegs = NULL; | 
 | 14664 | 		} | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 14665 | 		if (tp->regs) { | 
 | 14666 | 			iounmap(tp->regs); | 
| Peter Hagervall | 22abe31 | 2005-09-16 17:01:03 -0700 | [diff] [blame] | 14667 | 			tp->regs = NULL; | 
| Michael Chan | 6892914 | 2005-08-09 20:17:14 -0700 | [diff] [blame] | 14668 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14669 | 		free_netdev(dev); | 
 | 14670 | 		pci_release_regions(pdev); | 
 | 14671 | 		pci_disable_device(pdev); | 
 | 14672 | 		pci_set_drvdata(pdev, NULL); | 
 | 14673 | 	} | 
 | 14674 | } | 
 | 14675 |  | 
 | 14676 | static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | 
 | 14677 | { | 
 | 14678 | 	struct net_device *dev = pci_get_drvdata(pdev); | 
 | 14679 | 	struct tg3 *tp = netdev_priv(dev); | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 14680 | 	pci_power_t target_state; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14681 | 	int err; | 
 | 14682 |  | 
| Michael Chan | 3e0c95f | 2007-08-03 20:56:54 -0700 | [diff] [blame] | 14683 | 	/* PCI register 4 needs to be saved whether netif_running() or not. | 
 | 14684 | 	 * MSI address and data need to be saved if using MSI and | 
 | 14685 | 	 * netif_running(). | 
 | 14686 | 	 */ | 
 | 14687 | 	pci_save_state(pdev); | 
 | 14688 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14689 | 	if (!netif_running(dev)) | 
 | 14690 | 		return 0; | 
 | 14691 |  | 
| Michael Chan | 7faa006 | 2006-02-02 17:29:28 -0800 | [diff] [blame] | 14692 | 	flush_scheduled_work(); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 14693 | 	tg3_phy_stop(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14694 | 	tg3_netif_stop(tp); | 
 | 14695 |  | 
 | 14696 | 	del_timer_sync(&tp->timer); | 
 | 14697 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 14698 | 	tg3_full_lock(tp, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14699 | 	tg3_disable_ints(tp); | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 14700 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14701 |  | 
 | 14702 | 	netif_device_detach(dev); | 
 | 14703 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 14704 | 	tg3_full_lock(tp, 0); | 
| Michael Chan | 944d980 | 2005-05-29 14:57:48 -0700 | [diff] [blame] | 14705 | 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 
| Michael Chan | 6a9eba1 | 2005-12-13 21:08:58 -0800 | [diff] [blame] | 14706 | 	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 14707 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14708 |  | 
| Rafael J. Wysocki | 12dac07 | 2008-07-30 16:37:33 -0700 | [diff] [blame] | 14709 | 	target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot; | 
 | 14710 |  | 
 | 14711 | 	err = tg3_set_power_state(tp, target_state); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14712 | 	if (err) { | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 14713 | 		int err2; | 
 | 14714 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 14715 | 		tg3_full_lock(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14716 |  | 
| Michael Chan | 6a9eba1 | 2005-12-13 21:08:58 -0800 | [diff] [blame] | 14717 | 		tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 14718 | 		err2 = tg3_restart_hw(tp, 1); | 
 | 14719 | 		if (err2) | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 14720 | 			goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14721 |  | 
 | 14722 | 		tp->timer.expires = jiffies + tp->timer_offset; | 
 | 14723 | 		add_timer(&tp->timer); | 
 | 14724 |  | 
 | 14725 | 		netif_device_attach(dev); | 
 | 14726 | 		tg3_netif_start(tp); | 
 | 14727 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 14728 | out: | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 14729 | 		tg3_full_unlock(tp); | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 14730 |  | 
 | 14731 | 		if (!err2) | 
 | 14732 | 			tg3_phy_start(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14733 | 	} | 
 | 14734 |  | 
 | 14735 | 	return err; | 
 | 14736 | } | 
 | 14737 |  | 
 | 14738 | static int tg3_resume(struct pci_dev *pdev) | 
 | 14739 | { | 
 | 14740 | 	struct net_device *dev = pci_get_drvdata(pdev); | 
 | 14741 | 	struct tg3 *tp = netdev_priv(dev); | 
 | 14742 | 	int err; | 
 | 14743 |  | 
| Michael Chan | 3e0c95f | 2007-08-03 20:56:54 -0700 | [diff] [blame] | 14744 | 	pci_restore_state(tp->pdev); | 
 | 14745 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14746 | 	if (!netif_running(dev)) | 
 | 14747 | 		return 0; | 
 | 14748 |  | 
| Michael Chan | bc1c756 | 2006-03-20 17:48:03 -0800 | [diff] [blame] | 14749 | 	err = tg3_set_power_state(tp, PCI_D0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14750 | 	if (err) | 
 | 14751 | 		return err; | 
 | 14752 |  | 
 | 14753 | 	netif_device_attach(dev); | 
 | 14754 |  | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 14755 | 	tg3_full_lock(tp, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14756 |  | 
| Michael Chan | 6a9eba1 | 2005-12-13 21:08:58 -0800 | [diff] [blame] | 14757 | 	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 14758 | 	err = tg3_restart_hw(tp, 1); | 
 | 14759 | 	if (err) | 
 | 14760 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14761 |  | 
 | 14762 | 	tp->timer.expires = jiffies + tp->timer_offset; | 
 | 14763 | 	add_timer(&tp->timer); | 
 | 14764 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14765 | 	tg3_netif_start(tp); | 
 | 14766 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 14767 | out: | 
| David S. Miller | f47c11e | 2005-06-24 20:18:35 -0700 | [diff] [blame] | 14768 | 	tg3_full_unlock(tp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14769 |  | 
| Matt Carlson | b02fd9e | 2008-05-25 23:47:41 -0700 | [diff] [blame] | 14770 | 	if (!err) | 
 | 14771 | 		tg3_phy_start(tp); | 
 | 14772 |  | 
| Michael Chan | b9ec6c1 | 2006-07-25 16:37:27 -0700 | [diff] [blame] | 14773 | 	return err; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14774 | } | 
 | 14775 |  | 
 | 14776 | static struct pci_driver tg3_driver = { | 
 | 14777 | 	.name		= DRV_MODULE_NAME, | 
 | 14778 | 	.id_table	= tg3_pci_tbl, | 
 | 14779 | 	.probe		= tg3_init_one, | 
 | 14780 | 	.remove		= __devexit_p(tg3_remove_one), | 
 | 14781 | 	.suspend	= tg3_suspend, | 
 | 14782 | 	.resume		= tg3_resume | 
 | 14783 | }; | 
 | 14784 |  | 
 | 14785 | static int __init tg3_init(void) | 
 | 14786 | { | 
| Jeff Garzik | 2991762 | 2006-08-19 17:48:59 -0400 | [diff] [blame] | 14787 | 	return pci_register_driver(&tg3_driver); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14788 | } | 
 | 14789 |  | 
 | 14790 | static void __exit tg3_cleanup(void) | 
 | 14791 | { | 
 | 14792 | 	pci_unregister_driver(&tg3_driver); | 
 | 14793 | } | 
 | 14794 |  | 
 | 14795 | module_init(tg3_init); | 
 | 14796 | module_exit(tg3_cleanup); |