| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ | 
|  | 2 | * sungem.c: Sun GEM ethernet driver. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) | 
|  | 5 | * | 
|  | 6 | * Support for Apple GMAC and assorted PHYs, WOL, Power Management | 
|  | 7 | * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) | 
|  | 8 | * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. | 
|  | 9 | * | 
|  | 10 | * NAPI and NETPOLL support | 
|  | 11 | * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) | 
|  | 12 | * | 
|  | 13 | * TODO: | 
|  | 14 | *  - Now that the driver was significantly simplified, I need to rework | 
|  | 15 | *    the locking. I'm sure we don't need _2_ spinlocks, and we probably | 
|  | 16 | *    can avoid taking most of them for so long period of time (and schedule | 
|  | 17 | *    instead). The main issues at this point are caused by the netdev layer | 
|  | 18 | *    though: | 
|  | 19 | * | 
|  | 20 | *    gem_change_mtu() and gem_set_multicast() are called with a read_lock() | 
|  | 21 | *    help by net/core/dev.c, thus they can't schedule. That means they can't | 
|  | 22 | *    call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock | 
|  | 23 | *    where it could have been dropped. change_mtu especially would love also to | 
|  | 24 | *    be able to msleep instead of horrid locked delays when resetting the HW, | 
|  | 25 | *    but that read_lock() makes it impossible, unless I defer it's action to | 
|  | 26 | *    the reset task, which means it'll be asynchronous (won't take effect until | 
|  | 27 | *    the system schedules a bit). | 
|  | 28 | * | 
|  | 29 | *    Also, it would probably be possible to also remove most of the long-life | 
|  | 30 | *    locking in open/resume code path (gem_reinit_chip) by beeing more careful | 
|  | 31 | *    about when we can start taking interrupts or get xmit() called... | 
|  | 32 | */ | 
|  | 33 |  | 
|  | 34 | #include <linux/module.h> | 
|  | 35 | #include <linux/kernel.h> | 
|  | 36 | #include <linux/types.h> | 
|  | 37 | #include <linux/fcntl.h> | 
|  | 38 | #include <linux/interrupt.h> | 
|  | 39 | #include <linux/ioport.h> | 
|  | 40 | #include <linux/in.h> | 
|  | 41 | #include <linux/slab.h> | 
|  | 42 | #include <linux/string.h> | 
|  | 43 | #include <linux/delay.h> | 
|  | 44 | #include <linux/init.h> | 
|  | 45 | #include <linux/errno.h> | 
|  | 46 | #include <linux/pci.h> | 
| Domen Puncer | 1e7f0bd | 2005-06-26 18:22:14 -0400 | [diff] [blame] | 47 | #include <linux/dma-mapping.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/netdevice.h> | 
|  | 49 | #include <linux/etherdevice.h> | 
|  | 50 | #include <linux/skbuff.h> | 
|  | 51 | #include <linux/mii.h> | 
|  | 52 | #include <linux/ethtool.h> | 
|  | 53 | #include <linux/crc32.h> | 
|  | 54 | #include <linux/random.h> | 
|  | 55 | #include <linux/workqueue.h> | 
|  | 56 | #include <linux/if_vlan.h> | 
|  | 57 | #include <linux/bitops.h> | 
|  | 58 |  | 
|  | 59 | #include <asm/system.h> | 
|  | 60 | #include <asm/io.h> | 
|  | 61 | #include <asm/byteorder.h> | 
|  | 62 | #include <asm/uaccess.h> | 
|  | 63 | #include <asm/irq.h> | 
|  | 64 |  | 
|  | 65 | #ifdef __sparc__ | 
|  | 66 | #include <asm/idprom.h> | 
|  | 67 | #include <asm/openprom.h> | 
|  | 68 | #include <asm/oplib.h> | 
|  | 69 | #include <asm/pbm.h> | 
|  | 70 | #endif | 
|  | 71 |  | 
|  | 72 | #ifdef CONFIG_PPC_PMAC | 
|  | 73 | #include <asm/pci-bridge.h> | 
|  | 74 | #include <asm/prom.h> | 
|  | 75 | #include <asm/machdep.h> | 
|  | 76 | #include <asm/pmac_feature.h> | 
|  | 77 | #endif | 
|  | 78 |  | 
|  | 79 | #include "sungem_phy.h" | 
|  | 80 | #include "sungem.h" | 
|  | 81 |  | 
|  | 82 | /* Stripping FCS is causing problems, disabled for now */ | 
|  | 83 | #undef STRIP_FCS | 
|  | 84 |  | 
|  | 85 | #define DEFAULT_MSG	(NETIF_MSG_DRV		| \ | 
|  | 86 | NETIF_MSG_PROBE	| \ | 
|  | 87 | NETIF_MSG_LINK) | 
|  | 88 |  | 
|  | 89 | #define ADVERTISE_MASK	(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ | 
|  | 90 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ | 
|  | 91 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) | 
|  | 92 |  | 
|  | 93 | #define DRV_NAME	"sungem" | 
|  | 94 | #define DRV_VERSION	"0.98" | 
|  | 95 | #define DRV_RELDATE	"8/24/03" | 
|  | 96 | #define DRV_AUTHOR	"David S. Miller (davem@redhat.com)" | 
|  | 97 |  | 
|  | 98 | static char version[] __devinitdata = | 
|  | 99 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; | 
|  | 100 |  | 
|  | 101 | MODULE_AUTHOR(DRV_AUTHOR); | 
|  | 102 | MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); | 
|  | 103 | MODULE_LICENSE("GPL"); | 
|  | 104 |  | 
|  | 105 | #define GEM_MODULE_NAME	"gem" | 
|  | 106 | #define PFX GEM_MODULE_NAME ": " | 
|  | 107 |  | 
|  | 108 | static struct pci_device_id gem_pci_tbl[] = { | 
|  | 109 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, | 
|  | 110 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 
|  | 111 |  | 
|  | 112 | /* These models only differ from the original GEM in | 
|  | 113 | * that their tx/rx fifos are of a different size and | 
|  | 114 | * they only support 10/100 speeds. -DaveM | 
|  | 115 | * | 
|  | 116 | * Apple's GMAC does support gigabit on machines with | 
|  | 117 | * the BCM54xx PHYs. -BenH | 
|  | 118 | */ | 
|  | 119 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, | 
|  | 120 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 
|  | 121 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, | 
|  | 122 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 
|  | 123 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, | 
|  | 124 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 
|  | 125 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, | 
|  | 126 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 
|  | 127 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, | 
|  | 128 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 
|  | 129 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, | 
|  | 130 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 
| Olof Johansson | 7fce260 | 2005-11-13 16:06:48 -0800 | [diff] [blame] | 131 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, | 
|  | 132 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | {0, } | 
|  | 134 | }; | 
|  | 135 |  | 
|  | 136 | MODULE_DEVICE_TABLE(pci, gem_pci_tbl); | 
|  | 137 |  | 
|  | 138 | static u16 __phy_read(struct gem *gp, int phy_addr, int reg) | 
|  | 139 | { | 
|  | 140 | u32 cmd; | 
|  | 141 | int limit = 10000; | 
|  | 142 |  | 
|  | 143 | cmd  = (1 << 30); | 
|  | 144 | cmd |= (2 << 28); | 
|  | 145 | cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; | 
|  | 146 | cmd |= (reg << 18) & MIF_FRAME_REGAD; | 
|  | 147 | cmd |= (MIF_FRAME_TAMSB); | 
|  | 148 | writel(cmd, gp->regs + MIF_FRAME); | 
|  | 149 |  | 
|  | 150 | while (limit--) { | 
|  | 151 | cmd = readl(gp->regs + MIF_FRAME); | 
|  | 152 | if (cmd & MIF_FRAME_TALSB) | 
|  | 153 | break; | 
|  | 154 |  | 
|  | 155 | udelay(10); | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | if (!limit) | 
|  | 159 | cmd = 0xffff; | 
|  | 160 |  | 
|  | 161 | return cmd & MIF_FRAME_DATA; | 
|  | 162 | } | 
|  | 163 |  | 
|  | 164 | static inline int _phy_read(struct net_device *dev, int mii_id, int reg) | 
|  | 165 | { | 
|  | 166 | struct gem *gp = dev->priv; | 
|  | 167 | return __phy_read(gp, mii_id, reg); | 
|  | 168 | } | 
|  | 169 |  | 
|  | 170 | static inline u16 phy_read(struct gem *gp, int reg) | 
|  | 171 | { | 
|  | 172 | return __phy_read(gp, gp->mii_phy_addr, reg); | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) | 
|  | 176 | { | 
|  | 177 | u32 cmd; | 
|  | 178 | int limit = 10000; | 
|  | 179 |  | 
|  | 180 | cmd  = (1 << 30); | 
|  | 181 | cmd |= (1 << 28); | 
|  | 182 | cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; | 
|  | 183 | cmd |= (reg << 18) & MIF_FRAME_REGAD; | 
|  | 184 | cmd |= (MIF_FRAME_TAMSB); | 
|  | 185 | cmd |= (val & MIF_FRAME_DATA); | 
|  | 186 | writel(cmd, gp->regs + MIF_FRAME); | 
|  | 187 |  | 
|  | 188 | while (limit--) { | 
|  | 189 | cmd = readl(gp->regs + MIF_FRAME); | 
|  | 190 | if (cmd & MIF_FRAME_TALSB) | 
|  | 191 | break; | 
|  | 192 |  | 
|  | 193 | udelay(10); | 
|  | 194 | } | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) | 
|  | 198 | { | 
|  | 199 | struct gem *gp = dev->priv; | 
|  | 200 | __phy_write(gp, mii_id, reg, val & 0xffff); | 
|  | 201 | } | 
|  | 202 |  | 
|  | 203 | static inline void phy_write(struct gem *gp, int reg, u16 val) | 
|  | 204 | { | 
|  | 205 | __phy_write(gp, gp->mii_phy_addr, reg, val); | 
|  | 206 | } | 
|  | 207 |  | 
|  | 208 | static inline void gem_enable_ints(struct gem *gp) | 
|  | 209 | { | 
|  | 210 | /* Enable all interrupts but TXDONE */ | 
|  | 211 | writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | static inline void gem_disable_ints(struct gem *gp) | 
|  | 215 | { | 
|  | 216 | /* Disable all interrupts, including TXDONE */ | 
|  | 217 | writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); | 
|  | 218 | } | 
|  | 219 |  | 
|  | 220 | static void gem_get_cell(struct gem *gp) | 
|  | 221 | { | 
|  | 222 | BUG_ON(gp->cell_enabled < 0); | 
|  | 223 | gp->cell_enabled++; | 
|  | 224 | #ifdef CONFIG_PPC_PMAC | 
|  | 225 | if (gp->cell_enabled == 1) { | 
|  | 226 | mb(); | 
|  | 227 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); | 
|  | 228 | udelay(10); | 
|  | 229 | } | 
|  | 230 | #endif /* CONFIG_PPC_PMAC */ | 
|  | 231 | } | 
|  | 232 |  | 
|  | 233 | /* Turn off the chip's clock */ | 
|  | 234 | static void gem_put_cell(struct gem *gp) | 
|  | 235 | { | 
|  | 236 | BUG_ON(gp->cell_enabled <= 0); | 
|  | 237 | gp->cell_enabled--; | 
|  | 238 | #ifdef CONFIG_PPC_PMAC | 
|  | 239 | if (gp->cell_enabled == 0) { | 
|  | 240 | mb(); | 
|  | 241 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); | 
|  | 242 | udelay(10); | 
|  | 243 | } | 
|  | 244 | #endif /* CONFIG_PPC_PMAC */ | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) | 
|  | 248 | { | 
|  | 249 | if (netif_msg_intr(gp)) | 
|  | 250 | printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | 
|  | 254 | { | 
|  | 255 | u32 pcs_istat = readl(gp->regs + PCS_ISTAT); | 
|  | 256 | u32 pcs_miistat; | 
|  | 257 |  | 
|  | 258 | if (netif_msg_intr(gp)) | 
|  | 259 | printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", | 
|  | 260 | gp->dev->name, pcs_istat); | 
|  | 261 |  | 
|  | 262 | if (!(pcs_istat & PCS_ISTAT_LSC)) { | 
|  | 263 | printk(KERN_ERR "%s: PCS irq but no link status change???\n", | 
|  | 264 | dev->name); | 
|  | 265 | return 0; | 
|  | 266 | } | 
|  | 267 |  | 
|  | 268 | /* The link status bit latches on zero, so you must | 
|  | 269 | * read it twice in such a case to see a transition | 
|  | 270 | * to the link being up. | 
|  | 271 | */ | 
|  | 272 | pcs_miistat = readl(gp->regs + PCS_MIISTAT); | 
|  | 273 | if (!(pcs_miistat & PCS_MIISTAT_LS)) | 
|  | 274 | pcs_miistat |= | 
|  | 275 | (readl(gp->regs + PCS_MIISTAT) & | 
|  | 276 | PCS_MIISTAT_LS); | 
|  | 277 |  | 
|  | 278 | if (pcs_miistat & PCS_MIISTAT_ANC) { | 
|  | 279 | /* The remote-fault indication is only valid | 
|  | 280 | * when autoneg has completed. | 
|  | 281 | */ | 
|  | 282 | if (pcs_miistat & PCS_MIISTAT_RF) | 
|  | 283 | printk(KERN_INFO "%s: PCS AutoNEG complete, " | 
|  | 284 | "RemoteFault\n", dev->name); | 
|  | 285 | else | 
|  | 286 | printk(KERN_INFO "%s: PCS AutoNEG complete.\n", | 
|  | 287 | dev->name); | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | if (pcs_miistat & PCS_MIISTAT_LS) { | 
|  | 291 | printk(KERN_INFO "%s: PCS link is now up.\n", | 
|  | 292 | dev->name); | 
|  | 293 | netif_carrier_on(gp->dev); | 
|  | 294 | } else { | 
|  | 295 | printk(KERN_INFO "%s: PCS link is now down.\n", | 
|  | 296 | dev->name); | 
|  | 297 | netif_carrier_off(gp->dev); | 
|  | 298 | /* If this happens and the link timer is not running, | 
|  | 299 | * reset so we re-negotiate. | 
|  | 300 | */ | 
|  | 301 | if (!timer_pending(&gp->link_timer)) | 
|  | 302 | return 1; | 
|  | 303 | } | 
|  | 304 |  | 
|  | 305 | return 0; | 
|  | 306 | } | 
|  | 307 |  | 
|  | 308 | static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | 
|  | 309 | { | 
|  | 310 | u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); | 
|  | 311 |  | 
|  | 312 | if (netif_msg_intr(gp)) | 
|  | 313 | printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", | 
|  | 314 | gp->dev->name, txmac_stat); | 
|  | 315 |  | 
|  | 316 | /* Defer timer expiration is quite normal, | 
|  | 317 | * don't even log the event. | 
|  | 318 | */ | 
|  | 319 | if ((txmac_stat & MAC_TXSTAT_DTE) && | 
|  | 320 | !(txmac_stat & ~MAC_TXSTAT_DTE)) | 
|  | 321 | return 0; | 
|  | 322 |  | 
|  | 323 | if (txmac_stat & MAC_TXSTAT_URUN) { | 
|  | 324 | printk(KERN_ERR "%s: TX MAC xmit underrun.\n", | 
|  | 325 | dev->name); | 
|  | 326 | gp->net_stats.tx_fifo_errors++; | 
|  | 327 | } | 
|  | 328 |  | 
|  | 329 | if (txmac_stat & MAC_TXSTAT_MPE) { | 
|  | 330 | printk(KERN_ERR "%s: TX MAC max packet size error.\n", | 
|  | 331 | dev->name); | 
|  | 332 | gp->net_stats.tx_errors++; | 
|  | 333 | } | 
|  | 334 |  | 
|  | 335 | /* The rest are all cases of one of the 16-bit TX | 
|  | 336 | * counters expiring. | 
|  | 337 | */ | 
|  | 338 | if (txmac_stat & MAC_TXSTAT_NCE) | 
|  | 339 | gp->net_stats.collisions += 0x10000; | 
|  | 340 |  | 
|  | 341 | if (txmac_stat & MAC_TXSTAT_ECE) { | 
|  | 342 | gp->net_stats.tx_aborted_errors += 0x10000; | 
|  | 343 | gp->net_stats.collisions += 0x10000; | 
|  | 344 | } | 
|  | 345 |  | 
|  | 346 | if (txmac_stat & MAC_TXSTAT_LCE) { | 
|  | 347 | gp->net_stats.tx_aborted_errors += 0x10000; | 
|  | 348 | gp->net_stats.collisions += 0x10000; | 
|  | 349 | } | 
|  | 350 |  | 
|  | 351 | /* We do not keep track of MAC_TXSTAT_FCE and | 
|  | 352 | * MAC_TXSTAT_PCE events. | 
|  | 353 | */ | 
|  | 354 | return 0; | 
|  | 355 | } | 
|  | 356 |  | 
|  | 357 | /* When we get a RX fifo overflow, the RX unit in GEM is probably hung | 
|  | 358 | * so we do the following. | 
|  | 359 | * | 
|  | 360 | * If any part of the reset goes wrong, we return 1 and that causes the | 
|  | 361 | * whole chip to be reset. | 
|  | 362 | */ | 
|  | 363 | static int gem_rxmac_reset(struct gem *gp) | 
|  | 364 | { | 
|  | 365 | struct net_device *dev = gp->dev; | 
|  | 366 | int limit, i; | 
|  | 367 | u64 desc_dma; | 
|  | 368 | u32 val; | 
|  | 369 |  | 
|  | 370 | /* First, reset & disable MAC RX. */ | 
|  | 371 | writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); | 
|  | 372 | for (limit = 0; limit < 5000; limit++) { | 
|  | 373 | if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) | 
|  | 374 | break; | 
|  | 375 | udelay(10); | 
|  | 376 | } | 
|  | 377 | if (limit == 5000) { | 
|  | 378 | printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " | 
|  | 379 | "chip.\n", dev->name); | 
|  | 380 | return 1; | 
|  | 381 | } | 
|  | 382 |  | 
|  | 383 | writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, | 
|  | 384 | gp->regs + MAC_RXCFG); | 
|  | 385 | for (limit = 0; limit < 5000; limit++) { | 
|  | 386 | if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) | 
|  | 387 | break; | 
|  | 388 | udelay(10); | 
|  | 389 | } | 
|  | 390 | if (limit == 5000) { | 
|  | 391 | printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " | 
|  | 392 | "chip.\n", dev->name); | 
|  | 393 | return 1; | 
|  | 394 | } | 
|  | 395 |  | 
|  | 396 | /* Second, disable RX DMA. */ | 
|  | 397 | writel(0, gp->regs + RXDMA_CFG); | 
|  | 398 | for (limit = 0; limit < 5000; limit++) { | 
|  | 399 | if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) | 
|  | 400 | break; | 
|  | 401 | udelay(10); | 
|  | 402 | } | 
|  | 403 | if (limit == 5000) { | 
|  | 404 | printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " | 
|  | 405 | "chip.\n", dev->name); | 
|  | 406 | return 1; | 
|  | 407 | } | 
|  | 408 |  | 
|  | 409 | udelay(5000); | 
|  | 410 |  | 
|  | 411 | /* Execute RX reset command. */ | 
|  | 412 | writel(gp->swrst_base | GREG_SWRST_RXRST, | 
|  | 413 | gp->regs + GREG_SWRST); | 
|  | 414 | for (limit = 0; limit < 5000; limit++) { | 
|  | 415 | if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) | 
|  | 416 | break; | 
|  | 417 | udelay(10); | 
|  | 418 | } | 
|  | 419 | if (limit == 5000) { | 
|  | 420 | printk(KERN_ERR "%s: RX reset command will not execute, resetting " | 
|  | 421 | "whole chip.\n", dev->name); | 
|  | 422 | return 1; | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | /* Refresh the RX ring. */ | 
|  | 426 | for (i = 0; i < RX_RING_SIZE; i++) { | 
|  | 427 | struct gem_rxd *rxd = &gp->init_block->rxd[i]; | 
|  | 428 |  | 
|  | 429 | if (gp->rx_skbs[i] == NULL) { | 
|  | 430 | printk(KERN_ERR "%s: Parts of RX ring empty, resetting " | 
|  | 431 | "whole chip.\n", dev->name); | 
|  | 432 | return 1; | 
|  | 433 | } | 
|  | 434 |  | 
|  | 435 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); | 
|  | 436 | } | 
|  | 437 | gp->rx_new = gp->rx_old = 0; | 
|  | 438 |  | 
|  | 439 | /* Now we must reprogram the rest of RX unit. */ | 
|  | 440 | desc_dma = (u64) gp->gblock_dvma; | 
|  | 441 | desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); | 
|  | 442 | writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); | 
|  | 443 | writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); | 
|  | 444 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); | 
|  | 445 | val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | | 
|  | 446 | ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); | 
|  | 447 | writel(val, gp->regs + RXDMA_CFG); | 
|  | 448 | if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) | 
|  | 449 | writel(((5 & RXDMA_BLANK_IPKTS) | | 
|  | 450 | ((8 << 12) & RXDMA_BLANK_ITIME)), | 
|  | 451 | gp->regs + RXDMA_BLANK); | 
|  | 452 | else | 
|  | 453 | writel(((5 & RXDMA_BLANK_IPKTS) | | 
|  | 454 | ((4 << 12) & RXDMA_BLANK_ITIME)), | 
|  | 455 | gp->regs + RXDMA_BLANK); | 
|  | 456 | val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); | 
|  | 457 | val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); | 
|  | 458 | writel(val, gp->regs + RXDMA_PTHRESH); | 
|  | 459 | val = readl(gp->regs + RXDMA_CFG); | 
|  | 460 | writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); | 
|  | 461 | writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); | 
|  | 462 | val = readl(gp->regs + MAC_RXCFG); | 
|  | 463 | writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); | 
|  | 464 |  | 
|  | 465 | return 0; | 
|  | 466 | } | 
|  | 467 |  | 
|  | 468 | static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | 
|  | 469 | { | 
|  | 470 | u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); | 
|  | 471 | int ret = 0; | 
|  | 472 |  | 
|  | 473 | if (netif_msg_intr(gp)) | 
|  | 474 | printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", | 
|  | 475 | gp->dev->name, rxmac_stat); | 
|  | 476 |  | 
|  | 477 | if (rxmac_stat & MAC_RXSTAT_OFLW) { | 
|  | 478 | u32 smac = readl(gp->regs + MAC_SMACHINE); | 
|  | 479 |  | 
|  | 480 | printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", | 
|  | 481 | dev->name, smac); | 
|  | 482 | gp->net_stats.rx_over_errors++; | 
|  | 483 | gp->net_stats.rx_fifo_errors++; | 
|  | 484 |  | 
|  | 485 | ret = gem_rxmac_reset(gp); | 
|  | 486 | } | 
|  | 487 |  | 
|  | 488 | if (rxmac_stat & MAC_RXSTAT_ACE) | 
|  | 489 | gp->net_stats.rx_frame_errors += 0x10000; | 
|  | 490 |  | 
|  | 491 | if (rxmac_stat & MAC_RXSTAT_CCE) | 
|  | 492 | gp->net_stats.rx_crc_errors += 0x10000; | 
|  | 493 |  | 
|  | 494 | if (rxmac_stat & MAC_RXSTAT_LCE) | 
|  | 495 | gp->net_stats.rx_length_errors += 0x10000; | 
|  | 496 |  | 
|  | 497 | /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE | 
|  | 498 | * events. | 
|  | 499 | */ | 
|  | 500 | return ret; | 
|  | 501 | } | 
|  | 502 |  | 
|  | 503 | static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | 
|  | 504 | { | 
|  | 505 | u32 mac_cstat = readl(gp->regs + MAC_CSTAT); | 
|  | 506 |  | 
|  | 507 | if (netif_msg_intr(gp)) | 
|  | 508 | printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", | 
|  | 509 | gp->dev->name, mac_cstat); | 
|  | 510 |  | 
|  | 511 | /* This interrupt is just for pause frame and pause | 
|  | 512 | * tracking.  It is useful for diagnostics and debug | 
|  | 513 | * but probably by default we will mask these events. | 
|  | 514 | */ | 
|  | 515 | if (mac_cstat & MAC_CSTAT_PS) | 
|  | 516 | gp->pause_entered++; | 
|  | 517 |  | 
|  | 518 | if (mac_cstat & MAC_CSTAT_PRCV) | 
|  | 519 | gp->pause_last_time_recvd = (mac_cstat >> 16); | 
|  | 520 |  | 
|  | 521 | return 0; | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | 
|  | 525 | { | 
|  | 526 | u32 mif_status = readl(gp->regs + MIF_STATUS); | 
|  | 527 | u32 reg_val, changed_bits; | 
|  | 528 |  | 
|  | 529 | reg_val = (mif_status & MIF_STATUS_DATA) >> 16; | 
|  | 530 | changed_bits = (mif_status & MIF_STATUS_STAT); | 
|  | 531 |  | 
|  | 532 | gem_handle_mif_event(gp, reg_val, changed_bits); | 
|  | 533 |  | 
|  | 534 | return 0; | 
|  | 535 | } | 
|  | 536 |  | 
|  | 537 | static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) | 
|  | 538 | { | 
|  | 539 | u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); | 
|  | 540 |  | 
|  | 541 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && | 
|  | 542 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { | 
|  | 543 | printk(KERN_ERR "%s: PCI error [%04x] ", | 
|  | 544 | dev->name, pci_estat); | 
|  | 545 |  | 
|  | 546 | if (pci_estat & GREG_PCIESTAT_BADACK) | 
|  | 547 | printk("<No ACK64# during ABS64 cycle> "); | 
|  | 548 | if (pci_estat & GREG_PCIESTAT_DTRTO) | 
|  | 549 | printk("<Delayed transaction timeout> "); | 
|  | 550 | if (pci_estat & GREG_PCIESTAT_OTHER) | 
|  | 551 | printk("<other>"); | 
|  | 552 | printk("\n"); | 
|  | 553 | } else { | 
|  | 554 | pci_estat |= GREG_PCIESTAT_OTHER; | 
|  | 555 | printk(KERN_ERR "%s: PCI error\n", dev->name); | 
|  | 556 | } | 
|  | 557 |  | 
|  | 558 | if (pci_estat & GREG_PCIESTAT_OTHER) { | 
|  | 559 | u16 pci_cfg_stat; | 
|  | 560 |  | 
|  | 561 | /* Interrogate PCI config space for the | 
|  | 562 | * true cause. | 
|  | 563 | */ | 
|  | 564 | pci_read_config_word(gp->pdev, PCI_STATUS, | 
|  | 565 | &pci_cfg_stat); | 
|  | 566 | printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", | 
|  | 567 | dev->name, pci_cfg_stat); | 
|  | 568 | if (pci_cfg_stat & PCI_STATUS_PARITY) | 
|  | 569 | printk(KERN_ERR "%s: PCI parity error detected.\n", | 
|  | 570 | dev->name); | 
|  | 571 | if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) | 
|  | 572 | printk(KERN_ERR "%s: PCI target abort.\n", | 
|  | 573 | dev->name); | 
|  | 574 | if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) | 
|  | 575 | printk(KERN_ERR "%s: PCI master acks target abort.\n", | 
|  | 576 | dev->name); | 
|  | 577 | if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) | 
|  | 578 | printk(KERN_ERR "%s: PCI master abort.\n", | 
|  | 579 | dev->name); | 
|  | 580 | if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) | 
|  | 581 | printk(KERN_ERR "%s: PCI system error SERR#.\n", | 
|  | 582 | dev->name); | 
|  | 583 | if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) | 
|  | 584 | printk(KERN_ERR "%s: PCI parity error.\n", | 
|  | 585 | dev->name); | 
|  | 586 |  | 
|  | 587 | /* Write the error bits back to clear them. */ | 
|  | 588 | pci_cfg_stat &= (PCI_STATUS_PARITY | | 
|  | 589 | PCI_STATUS_SIG_TARGET_ABORT | | 
|  | 590 | PCI_STATUS_REC_TARGET_ABORT | | 
|  | 591 | PCI_STATUS_REC_MASTER_ABORT | | 
|  | 592 | PCI_STATUS_SIG_SYSTEM_ERROR | | 
|  | 593 | PCI_STATUS_DETECTED_PARITY); | 
|  | 594 | pci_write_config_word(gp->pdev, | 
|  | 595 | PCI_STATUS, pci_cfg_stat); | 
|  | 596 | } | 
|  | 597 |  | 
|  | 598 | /* For all PCI errors, we should reset the chip. */ | 
|  | 599 | return 1; | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | /* All non-normal interrupt conditions get serviced here. | 
|  | 603 | * Returns non-zero if we should just exit the interrupt | 
|  | 604 | * handler right now (ie. if we reset the card which invalidates | 
|  | 605 | * all of the other original irq status bits). | 
|  | 606 | */ | 
|  | 607 | static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) | 
|  | 608 | { | 
|  | 609 | if (gem_status & GREG_STAT_RXNOBUF) { | 
|  | 610 | /* Frame arrived, no free RX buffers available. */ | 
|  | 611 | if (netif_msg_rx_err(gp)) | 
|  | 612 | printk(KERN_DEBUG "%s: no buffer for rx frame\n", | 
|  | 613 | gp->dev->name); | 
|  | 614 | gp->net_stats.rx_dropped++; | 
|  | 615 | } | 
|  | 616 |  | 
|  | 617 | if (gem_status & GREG_STAT_RXTAGERR) { | 
|  | 618 | /* corrupt RX tag framing */ | 
|  | 619 | if (netif_msg_rx_err(gp)) | 
|  | 620 | printk(KERN_DEBUG "%s: corrupt rx tag framing\n", | 
|  | 621 | gp->dev->name); | 
|  | 622 | gp->net_stats.rx_errors++; | 
|  | 623 |  | 
|  | 624 | goto do_reset; | 
|  | 625 | } | 
|  | 626 |  | 
|  | 627 | if (gem_status & GREG_STAT_PCS) { | 
|  | 628 | if (gem_pcs_interrupt(dev, gp, gem_status)) | 
|  | 629 | goto do_reset; | 
|  | 630 | } | 
|  | 631 |  | 
|  | 632 | if (gem_status & GREG_STAT_TXMAC) { | 
|  | 633 | if (gem_txmac_interrupt(dev, gp, gem_status)) | 
|  | 634 | goto do_reset; | 
|  | 635 | } | 
|  | 636 |  | 
|  | 637 | if (gem_status & GREG_STAT_RXMAC) { | 
|  | 638 | if (gem_rxmac_interrupt(dev, gp, gem_status)) | 
|  | 639 | goto do_reset; | 
|  | 640 | } | 
|  | 641 |  | 
|  | 642 | if (gem_status & GREG_STAT_MAC) { | 
|  | 643 | if (gem_mac_interrupt(dev, gp, gem_status)) | 
|  | 644 | goto do_reset; | 
|  | 645 | } | 
|  | 646 |  | 
|  | 647 | if (gem_status & GREG_STAT_MIF) { | 
|  | 648 | if (gem_mif_interrupt(dev, gp, gem_status)) | 
|  | 649 | goto do_reset; | 
|  | 650 | } | 
|  | 651 |  | 
|  | 652 | if (gem_status & GREG_STAT_PCIERR) { | 
|  | 653 | if (gem_pci_interrupt(dev, gp, gem_status)) | 
|  | 654 | goto do_reset; | 
|  | 655 | } | 
|  | 656 |  | 
|  | 657 | return 0; | 
|  | 658 |  | 
|  | 659 | do_reset: | 
|  | 660 | gp->reset_task_pending = 1; | 
|  | 661 | schedule_work(&gp->reset_task); | 
|  | 662 |  | 
|  | 663 | return 1; | 
|  | 664 | } | 
|  | 665 |  | 
|  | 666 | static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) | 
|  | 667 | { | 
|  | 668 | int entry, limit; | 
|  | 669 |  | 
|  | 670 | if (netif_msg_intr(gp)) | 
|  | 671 | printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", | 
|  | 672 | gp->dev->name, gem_status); | 
|  | 673 |  | 
|  | 674 | entry = gp->tx_old; | 
|  | 675 | limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); | 
|  | 676 | while (entry != limit) { | 
|  | 677 | struct sk_buff *skb; | 
|  | 678 | struct gem_txd *txd; | 
|  | 679 | dma_addr_t dma_addr; | 
|  | 680 | u32 dma_len; | 
|  | 681 | int frag; | 
|  | 682 |  | 
|  | 683 | if (netif_msg_tx_done(gp)) | 
|  | 684 | printk(KERN_DEBUG "%s: tx done, slot %d\n", | 
|  | 685 | gp->dev->name, entry); | 
|  | 686 | skb = gp->tx_skbs[entry]; | 
|  | 687 | if (skb_shinfo(skb)->nr_frags) { | 
|  | 688 | int last = entry + skb_shinfo(skb)->nr_frags; | 
|  | 689 | int walk = entry; | 
|  | 690 | int incomplete = 0; | 
|  | 691 |  | 
|  | 692 | last &= (TX_RING_SIZE - 1); | 
|  | 693 | for (;;) { | 
|  | 694 | walk = NEXT_TX(walk); | 
|  | 695 | if (walk == limit) | 
|  | 696 | incomplete = 1; | 
|  | 697 | if (walk == last) | 
|  | 698 | break; | 
|  | 699 | } | 
|  | 700 | if (incomplete) | 
|  | 701 | break; | 
|  | 702 | } | 
|  | 703 | gp->tx_skbs[entry] = NULL; | 
|  | 704 | gp->net_stats.tx_bytes += skb->len; | 
|  | 705 |  | 
|  | 706 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { | 
|  | 707 | txd = &gp->init_block->txd[entry]; | 
|  | 708 |  | 
|  | 709 | dma_addr = le64_to_cpu(txd->buffer); | 
|  | 710 | dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; | 
|  | 711 |  | 
|  | 712 | pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); | 
|  | 713 | entry = NEXT_TX(entry); | 
|  | 714 | } | 
|  | 715 |  | 
|  | 716 | gp->net_stats.tx_packets++; | 
|  | 717 | dev_kfree_skb_irq(skb); | 
|  | 718 | } | 
|  | 719 | gp->tx_old = entry; | 
|  | 720 |  | 
|  | 721 | if (netif_queue_stopped(dev) && | 
|  | 722 | TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) | 
|  | 723 | netif_wake_queue(dev); | 
|  | 724 | } | 
|  | 725 |  | 
|  | 726 | static __inline__ void gem_post_rxds(struct gem *gp, int limit) | 
|  | 727 | { | 
|  | 728 | int cluster_start, curr, count, kick; | 
|  | 729 |  | 
|  | 730 | cluster_start = curr = (gp->rx_new & ~(4 - 1)); | 
|  | 731 | count = 0; | 
|  | 732 | kick = -1; | 
|  | 733 | wmb(); | 
|  | 734 | while (curr != limit) { | 
|  | 735 | curr = NEXT_RX(curr); | 
|  | 736 | if (++count == 4) { | 
|  | 737 | struct gem_rxd *rxd = | 
|  | 738 | &gp->init_block->rxd[cluster_start]; | 
|  | 739 | for (;;) { | 
|  | 740 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); | 
|  | 741 | rxd++; | 
|  | 742 | cluster_start = NEXT_RX(cluster_start); | 
|  | 743 | if (cluster_start == curr) | 
|  | 744 | break; | 
|  | 745 | } | 
|  | 746 | kick = curr; | 
|  | 747 | count = 0; | 
|  | 748 | } | 
|  | 749 | } | 
|  | 750 | if (kick >= 0) { | 
|  | 751 | mb(); | 
|  | 752 | writel(kick, gp->regs + RXDMA_KICK); | 
|  | 753 | } | 
|  | 754 | } | 
|  | 755 |  | 
|  | 756 | static int gem_rx(struct gem *gp, int work_to_do) | 
|  | 757 | { | 
|  | 758 | int entry, drops, work_done = 0; | 
|  | 759 | u32 done; | 
|  | 760 |  | 
|  | 761 | if (netif_msg_rx_status(gp)) | 
|  | 762 | printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", | 
|  | 763 | gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); | 
|  | 764 |  | 
|  | 765 | entry = gp->rx_new; | 
|  | 766 | drops = 0; | 
|  | 767 | done = readl(gp->regs + RXDMA_DONE); | 
|  | 768 | for (;;) { | 
|  | 769 | struct gem_rxd *rxd = &gp->init_block->rxd[entry]; | 
|  | 770 | struct sk_buff *skb; | 
|  | 771 | u64 status = cpu_to_le64(rxd->status_word); | 
|  | 772 | dma_addr_t dma_addr; | 
|  | 773 | int len; | 
|  | 774 |  | 
|  | 775 | if ((status & RXDCTRL_OWN) != 0) | 
|  | 776 | break; | 
|  | 777 |  | 
|  | 778 | if (work_done >= RX_RING_SIZE || work_done >= work_to_do) | 
|  | 779 | break; | 
|  | 780 |  | 
|  | 781 | /* When writing back RX descriptor, GEM writes status | 
|  | 782 | * then buffer address, possibly in seperate transactions. | 
|  | 783 | * If we don't wait for the chip to write both, we could | 
|  | 784 | * post a new buffer to this descriptor then have GEM spam | 
|  | 785 | * on the buffer address.  We sync on the RX completion | 
|  | 786 | * register to prevent this from happening. | 
|  | 787 | */ | 
|  | 788 | if (entry == done) { | 
|  | 789 | done = readl(gp->regs + RXDMA_DONE); | 
|  | 790 | if (entry == done) | 
|  | 791 | break; | 
|  | 792 | } | 
|  | 793 |  | 
|  | 794 | /* We can now account for the work we're about to do */ | 
|  | 795 | work_done++; | 
|  | 796 |  | 
|  | 797 | skb = gp->rx_skbs[entry]; | 
|  | 798 |  | 
|  | 799 | len = (status & RXDCTRL_BUFSZ) >> 16; | 
|  | 800 | if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { | 
|  | 801 | gp->net_stats.rx_errors++; | 
|  | 802 | if (len < ETH_ZLEN) | 
|  | 803 | gp->net_stats.rx_length_errors++; | 
|  | 804 | if (len & RXDCTRL_BAD) | 
|  | 805 | gp->net_stats.rx_crc_errors++; | 
|  | 806 |  | 
|  | 807 | /* We'll just return it to GEM. */ | 
|  | 808 | drop_it: | 
|  | 809 | gp->net_stats.rx_dropped++; | 
|  | 810 | goto next; | 
|  | 811 | } | 
|  | 812 |  | 
|  | 813 | dma_addr = cpu_to_le64(rxd->buffer); | 
|  | 814 | if (len > RX_COPY_THRESHOLD) { | 
|  | 815 | struct sk_buff *new_skb; | 
|  | 816 |  | 
|  | 817 | new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); | 
|  | 818 | if (new_skb == NULL) { | 
|  | 819 | drops++; | 
|  | 820 | goto drop_it; | 
|  | 821 | } | 
|  | 822 | pci_unmap_page(gp->pdev, dma_addr, | 
|  | 823 | RX_BUF_ALLOC_SIZE(gp), | 
|  | 824 | PCI_DMA_FROMDEVICE); | 
|  | 825 | gp->rx_skbs[entry] = new_skb; | 
|  | 826 | new_skb->dev = gp->dev; | 
|  | 827 | skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); | 
|  | 828 | rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, | 
|  | 829 | virt_to_page(new_skb->data), | 
|  | 830 | offset_in_page(new_skb->data), | 
|  | 831 | RX_BUF_ALLOC_SIZE(gp), | 
|  | 832 | PCI_DMA_FROMDEVICE)); | 
|  | 833 | skb_reserve(new_skb, RX_OFFSET); | 
|  | 834 |  | 
|  | 835 | /* Trim the original skb for the netif. */ | 
|  | 836 | skb_trim(skb, len); | 
|  | 837 | } else { | 
|  | 838 | struct sk_buff *copy_skb = dev_alloc_skb(len + 2); | 
|  | 839 |  | 
|  | 840 | if (copy_skb == NULL) { | 
|  | 841 | drops++; | 
|  | 842 | goto drop_it; | 
|  | 843 | } | 
|  | 844 |  | 
|  | 845 | copy_skb->dev = gp->dev; | 
|  | 846 | skb_reserve(copy_skb, 2); | 
|  | 847 | skb_put(copy_skb, len); | 
|  | 848 | pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 
|  | 849 | memcpy(copy_skb->data, skb->data, len); | 
|  | 850 | pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 
|  | 851 |  | 
|  | 852 | /* We'll reuse the original ring buffer. */ | 
|  | 853 | skb = copy_skb; | 
|  | 854 | } | 
|  | 855 |  | 
|  | 856 | skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff); | 
|  | 857 | skb->ip_summed = CHECKSUM_HW; | 
|  | 858 | skb->protocol = eth_type_trans(skb, gp->dev); | 
|  | 859 |  | 
|  | 860 | netif_receive_skb(skb); | 
|  | 861 |  | 
|  | 862 | gp->net_stats.rx_packets++; | 
|  | 863 | gp->net_stats.rx_bytes += len; | 
|  | 864 | gp->dev->last_rx = jiffies; | 
|  | 865 |  | 
|  | 866 | next: | 
|  | 867 | entry = NEXT_RX(entry); | 
|  | 868 | } | 
|  | 869 |  | 
|  | 870 | gem_post_rxds(gp, entry); | 
|  | 871 |  | 
|  | 872 | gp->rx_new = entry; | 
|  | 873 |  | 
|  | 874 | if (drops) | 
|  | 875 | printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", | 
|  | 876 | gp->dev->name); | 
|  | 877 |  | 
|  | 878 | return work_done; | 
|  | 879 | } | 
|  | 880 |  | 
|  | 881 | static int gem_poll(struct net_device *dev, int *budget) | 
|  | 882 | { | 
|  | 883 | struct gem *gp = dev->priv; | 
|  | 884 | unsigned long flags; | 
|  | 885 |  | 
|  | 886 | /* | 
|  | 887 | * NAPI locking nightmare: See comment at head of driver | 
|  | 888 | */ | 
|  | 889 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 890 |  | 
|  | 891 | do { | 
|  | 892 | int work_to_do, work_done; | 
|  | 893 |  | 
|  | 894 | /* Handle anomalies */ | 
|  | 895 | if (gp->status & GREG_STAT_ABNORMAL) { | 
|  | 896 | if (gem_abnormal_irq(dev, gp, gp->status)) | 
|  | 897 | break; | 
|  | 898 | } | 
|  | 899 |  | 
|  | 900 | /* Run TX completion thread */ | 
|  | 901 | spin_lock(&gp->tx_lock); | 
|  | 902 | gem_tx(dev, gp, gp->status); | 
|  | 903 | spin_unlock(&gp->tx_lock); | 
|  | 904 |  | 
|  | 905 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 906 |  | 
|  | 907 | /* Run RX thread. We don't use any locking here, | 
|  | 908 | * code willing to do bad things - like cleaning the | 
|  | 909 | * rx ring - must call netif_poll_disable(), which | 
|  | 910 | * schedule_timeout()'s if polling is already disabled. | 
|  | 911 | */ | 
|  | 912 | work_to_do = min(*budget, dev->quota); | 
|  | 913 |  | 
|  | 914 | work_done = gem_rx(gp, work_to_do); | 
|  | 915 |  | 
|  | 916 | *budget -= work_done; | 
|  | 917 | dev->quota -= work_done; | 
|  | 918 |  | 
|  | 919 | if (work_done >= work_to_do) | 
|  | 920 | return 1; | 
|  | 921 |  | 
|  | 922 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 923 |  | 
|  | 924 | gp->status = readl(gp->regs + GREG_STAT); | 
|  | 925 | } while (gp->status & GREG_STAT_NAPI); | 
|  | 926 |  | 
|  | 927 | __netif_rx_complete(dev); | 
|  | 928 | gem_enable_ints(gp); | 
|  | 929 |  | 
|  | 930 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 931 | return 0; | 
|  | 932 | } | 
|  | 933 |  | 
|  | 934 | static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs) | 
|  | 935 | { | 
|  | 936 | struct net_device *dev = dev_id; | 
|  | 937 | struct gem *gp = dev->priv; | 
|  | 938 | unsigned long flags; | 
|  | 939 |  | 
|  | 940 | /* Swallow interrupts when shutting the chip down, though | 
|  | 941 | * that shouldn't happen, we should have done free_irq() at | 
|  | 942 | * this point... | 
|  | 943 | */ | 
|  | 944 | if (!gp->running) | 
|  | 945 | return IRQ_HANDLED; | 
|  | 946 |  | 
|  | 947 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 948 |  | 
|  | 949 | if (netif_rx_schedule_prep(dev)) { | 
|  | 950 | u32 gem_status = readl(gp->regs + GREG_STAT); | 
|  | 951 |  | 
|  | 952 | if (gem_status == 0) { | 
| Eric Lemoine | 86d9f7f | 2005-09-01 17:41:07 -0700 | [diff] [blame] | 953 | netif_poll_enable(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 955 | return IRQ_NONE; | 
|  | 956 | } | 
|  | 957 | gp->status = gem_status; | 
|  | 958 | gem_disable_ints(gp); | 
|  | 959 | __netif_rx_schedule(dev); | 
|  | 960 | } | 
|  | 961 |  | 
|  | 962 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 963 |  | 
|  | 964 | /* If polling was disabled at the time we received that | 
|  | 965 | * interrupt, we may return IRQ_HANDLED here while we | 
|  | 966 | * should return IRQ_NONE. No big deal... | 
|  | 967 | */ | 
|  | 968 | return IRQ_HANDLED; | 
|  | 969 | } | 
|  | 970 |  | 
|  | 971 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
|  | 972 | static void gem_poll_controller(struct net_device *dev) | 
|  | 973 | { | 
|  | 974 | /* gem_interrupt is safe to reentrance so no need | 
|  | 975 | * to disable_irq here. | 
|  | 976 | */ | 
|  | 977 | gem_interrupt(dev->irq, dev, NULL); | 
|  | 978 | } | 
|  | 979 | #endif | 
|  | 980 |  | 
|  | 981 | static void gem_tx_timeout(struct net_device *dev) | 
|  | 982 | { | 
|  | 983 | struct gem *gp = dev->priv; | 
|  | 984 |  | 
|  | 985 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); | 
|  | 986 | if (!gp->running) { | 
|  | 987 | printk("%s: hrm.. hw not running !\n", dev->name); | 
|  | 988 | return; | 
|  | 989 | } | 
|  | 990 | printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", | 
|  | 991 | dev->name, | 
|  | 992 | readl(gp->regs + TXDMA_CFG), | 
|  | 993 | readl(gp->regs + MAC_TXSTAT), | 
|  | 994 | readl(gp->regs + MAC_TXCFG)); | 
|  | 995 | printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", | 
|  | 996 | dev->name, | 
|  | 997 | readl(gp->regs + RXDMA_CFG), | 
|  | 998 | readl(gp->regs + MAC_RXSTAT), | 
|  | 999 | readl(gp->regs + MAC_RXCFG)); | 
|  | 1000 |  | 
|  | 1001 | spin_lock_irq(&gp->lock); | 
|  | 1002 | spin_lock(&gp->tx_lock); | 
|  | 1003 |  | 
|  | 1004 | gp->reset_task_pending = 1; | 
|  | 1005 | schedule_work(&gp->reset_task); | 
|  | 1006 |  | 
|  | 1007 | spin_unlock(&gp->tx_lock); | 
|  | 1008 | spin_unlock_irq(&gp->lock); | 
|  | 1009 | } | 
|  | 1010 |  | 
|  | 1011 | static __inline__ int gem_intme(int entry) | 
|  | 1012 | { | 
|  | 1013 | /* Algorithm: IRQ every 1/2 of descriptors. */ | 
|  | 1014 | if (!(entry & ((TX_RING_SIZE>>1)-1))) | 
|  | 1015 | return 1; | 
|  | 1016 |  | 
|  | 1017 | return 0; | 
|  | 1018 | } | 
|  | 1019 |  | 
|  | 1020 | static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) | 
|  | 1021 | { | 
|  | 1022 | struct gem *gp = dev->priv; | 
|  | 1023 | int entry; | 
|  | 1024 | u64 ctrl; | 
|  | 1025 | unsigned long flags; | 
|  | 1026 |  | 
|  | 1027 | ctrl = 0; | 
|  | 1028 | if (skb->ip_summed == CHECKSUM_HW) { | 
|  | 1029 | u64 csum_start_off, csum_stuff_off; | 
|  | 1030 |  | 
|  | 1031 | csum_start_off = (u64) (skb->h.raw - skb->data); | 
|  | 1032 | csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data); | 
|  | 1033 |  | 
|  | 1034 | ctrl = (TXDCTRL_CENAB | | 
|  | 1035 | (csum_start_off << 15) | | 
|  | 1036 | (csum_stuff_off << 21)); | 
|  | 1037 | } | 
|  | 1038 |  | 
|  | 1039 | local_irq_save(flags); | 
|  | 1040 | if (!spin_trylock(&gp->tx_lock)) { | 
|  | 1041 | /* Tell upper layer to requeue */ | 
|  | 1042 | local_irq_restore(flags); | 
|  | 1043 | return NETDEV_TX_LOCKED; | 
|  | 1044 | } | 
|  | 1045 | /* We raced with gem_do_stop() */ | 
|  | 1046 | if (!gp->running) { | 
|  | 1047 | spin_unlock_irqrestore(&gp->tx_lock, flags); | 
|  | 1048 | return NETDEV_TX_BUSY; | 
|  | 1049 | } | 
|  | 1050 |  | 
|  | 1051 | /* This is a hard error, log it. */ | 
|  | 1052 | if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { | 
|  | 1053 | netif_stop_queue(dev); | 
|  | 1054 | spin_unlock_irqrestore(&gp->tx_lock, flags); | 
|  | 1055 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", | 
|  | 1056 | dev->name); | 
|  | 1057 | return NETDEV_TX_BUSY; | 
|  | 1058 | } | 
|  | 1059 |  | 
|  | 1060 | entry = gp->tx_new; | 
|  | 1061 | gp->tx_skbs[entry] = skb; | 
|  | 1062 |  | 
|  | 1063 | if (skb_shinfo(skb)->nr_frags == 0) { | 
|  | 1064 | struct gem_txd *txd = &gp->init_block->txd[entry]; | 
|  | 1065 | dma_addr_t mapping; | 
|  | 1066 | u32 len; | 
|  | 1067 |  | 
|  | 1068 | len = skb->len; | 
|  | 1069 | mapping = pci_map_page(gp->pdev, | 
|  | 1070 | virt_to_page(skb->data), | 
|  | 1071 | offset_in_page(skb->data), | 
|  | 1072 | len, PCI_DMA_TODEVICE); | 
|  | 1073 | ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; | 
|  | 1074 | if (gem_intme(entry)) | 
|  | 1075 | ctrl |= TXDCTRL_INTME; | 
|  | 1076 | txd->buffer = cpu_to_le64(mapping); | 
|  | 1077 | wmb(); | 
|  | 1078 | txd->control_word = cpu_to_le64(ctrl); | 
|  | 1079 | entry = NEXT_TX(entry); | 
|  | 1080 | } else { | 
|  | 1081 | struct gem_txd *txd; | 
|  | 1082 | u32 first_len; | 
|  | 1083 | u64 intme; | 
|  | 1084 | dma_addr_t first_mapping; | 
|  | 1085 | int frag, first_entry = entry; | 
|  | 1086 |  | 
|  | 1087 | intme = 0; | 
|  | 1088 | if (gem_intme(entry)) | 
|  | 1089 | intme |= TXDCTRL_INTME; | 
|  | 1090 |  | 
|  | 1091 | /* We must give this initial chunk to the device last. | 
|  | 1092 | * Otherwise we could race with the device. | 
|  | 1093 | */ | 
|  | 1094 | first_len = skb_headlen(skb); | 
|  | 1095 | first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), | 
|  | 1096 | offset_in_page(skb->data), | 
|  | 1097 | first_len, PCI_DMA_TODEVICE); | 
|  | 1098 | entry = NEXT_TX(entry); | 
|  | 1099 |  | 
|  | 1100 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 
|  | 1101 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | 
|  | 1102 | u32 len; | 
|  | 1103 | dma_addr_t mapping; | 
|  | 1104 | u64 this_ctrl; | 
|  | 1105 |  | 
|  | 1106 | len = this_frag->size; | 
|  | 1107 | mapping = pci_map_page(gp->pdev, | 
|  | 1108 | this_frag->page, | 
|  | 1109 | this_frag->page_offset, | 
|  | 1110 | len, PCI_DMA_TODEVICE); | 
|  | 1111 | this_ctrl = ctrl; | 
|  | 1112 | if (frag == skb_shinfo(skb)->nr_frags - 1) | 
|  | 1113 | this_ctrl |= TXDCTRL_EOF; | 
|  | 1114 |  | 
|  | 1115 | txd = &gp->init_block->txd[entry]; | 
|  | 1116 | txd->buffer = cpu_to_le64(mapping); | 
|  | 1117 | wmb(); | 
|  | 1118 | txd->control_word = cpu_to_le64(this_ctrl | len); | 
|  | 1119 |  | 
|  | 1120 | if (gem_intme(entry)) | 
|  | 1121 | intme |= TXDCTRL_INTME; | 
|  | 1122 |  | 
|  | 1123 | entry = NEXT_TX(entry); | 
|  | 1124 | } | 
|  | 1125 | txd = &gp->init_block->txd[first_entry]; | 
|  | 1126 | txd->buffer = cpu_to_le64(first_mapping); | 
|  | 1127 | wmb(); | 
|  | 1128 | txd->control_word = | 
|  | 1129 | cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); | 
|  | 1130 | } | 
|  | 1131 |  | 
|  | 1132 | gp->tx_new = entry; | 
|  | 1133 | if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) | 
|  | 1134 | netif_stop_queue(dev); | 
|  | 1135 |  | 
|  | 1136 | if (netif_msg_tx_queued(gp)) | 
|  | 1137 | printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", | 
|  | 1138 | dev->name, entry, skb->len); | 
|  | 1139 | mb(); | 
|  | 1140 | writel(gp->tx_new, gp->regs + TXDMA_KICK); | 
|  | 1141 | spin_unlock_irqrestore(&gp->tx_lock, flags); | 
|  | 1142 |  | 
|  | 1143 | dev->trans_start = jiffies; | 
|  | 1144 |  | 
|  | 1145 | return NETDEV_TX_OK; | 
|  | 1146 | } | 
|  | 1147 |  | 
|  | 1148 | #define STOP_TRIES 32 | 
|  | 1149 |  | 
|  | 1150 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1151 | static void gem_reset(struct gem *gp) | 
|  | 1152 | { | 
|  | 1153 | int limit; | 
|  | 1154 | u32 val; | 
|  | 1155 |  | 
|  | 1156 | /* Make sure we won't get any more interrupts */ | 
|  | 1157 | writel(0xffffffff, gp->regs + GREG_IMASK); | 
|  | 1158 |  | 
|  | 1159 | /* Reset the chip */ | 
|  | 1160 | writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, | 
|  | 1161 | gp->regs + GREG_SWRST); | 
|  | 1162 |  | 
|  | 1163 | limit = STOP_TRIES; | 
|  | 1164 |  | 
|  | 1165 | do { | 
|  | 1166 | udelay(20); | 
|  | 1167 | val = readl(gp->regs + GREG_SWRST); | 
|  | 1168 | if (limit-- <= 0) | 
|  | 1169 | break; | 
|  | 1170 | } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); | 
|  | 1171 |  | 
|  | 1172 | if (limit <= 0) | 
|  | 1173 | printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); | 
|  | 1174 | } | 
|  | 1175 |  | 
|  | 1176 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1177 | static void gem_start_dma(struct gem *gp) | 
|  | 1178 | { | 
|  | 1179 | u32 val; | 
|  | 1180 |  | 
|  | 1181 | /* We are ready to rock, turn everything on. */ | 
|  | 1182 | val = readl(gp->regs + TXDMA_CFG); | 
|  | 1183 | writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); | 
|  | 1184 | val = readl(gp->regs + RXDMA_CFG); | 
|  | 1185 | writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); | 
|  | 1186 | val = readl(gp->regs + MAC_TXCFG); | 
|  | 1187 | writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); | 
|  | 1188 | val = readl(gp->regs + MAC_RXCFG); | 
|  | 1189 | writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); | 
|  | 1190 |  | 
|  | 1191 | (void) readl(gp->regs + MAC_RXCFG); | 
|  | 1192 | udelay(100); | 
|  | 1193 |  | 
|  | 1194 | gem_enable_ints(gp); | 
|  | 1195 |  | 
|  | 1196 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); | 
|  | 1197 | } | 
|  | 1198 |  | 
|  | 1199 | /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be | 
|  | 1200 | * actually stopped before about 4ms tho ... | 
|  | 1201 | */ | 
|  | 1202 | static void gem_stop_dma(struct gem *gp) | 
|  | 1203 | { | 
|  | 1204 | u32 val; | 
|  | 1205 |  | 
|  | 1206 | /* We are done rocking, turn everything off. */ | 
|  | 1207 | val = readl(gp->regs + TXDMA_CFG); | 
|  | 1208 | writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); | 
|  | 1209 | val = readl(gp->regs + RXDMA_CFG); | 
|  | 1210 | writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); | 
|  | 1211 | val = readl(gp->regs + MAC_TXCFG); | 
|  | 1212 | writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); | 
|  | 1213 | val = readl(gp->regs + MAC_RXCFG); | 
|  | 1214 | writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); | 
|  | 1215 |  | 
|  | 1216 | (void) readl(gp->regs + MAC_RXCFG); | 
|  | 1217 |  | 
|  | 1218 | /* Need to wait a bit ... done by the caller */ | 
|  | 1219 | } | 
|  | 1220 |  | 
|  | 1221 |  | 
|  | 1222 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1223 | // XXX dbl check what that function should do when called on PCS PHY | 
|  | 1224 | static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) | 
|  | 1225 | { | 
|  | 1226 | u32 advertise, features; | 
|  | 1227 | int autoneg; | 
|  | 1228 | int speed; | 
|  | 1229 | int duplex; | 
|  | 1230 |  | 
|  | 1231 | if (gp->phy_type != phy_mii_mdio0 && | 
|  | 1232 | gp->phy_type != phy_mii_mdio1) | 
|  | 1233 | goto non_mii; | 
|  | 1234 |  | 
|  | 1235 | /* Setup advertise */ | 
|  | 1236 | if (found_mii_phy(gp)) | 
|  | 1237 | features = gp->phy_mii.def->features; | 
|  | 1238 | else | 
|  | 1239 | features = 0; | 
|  | 1240 |  | 
|  | 1241 | advertise = features & ADVERTISE_MASK; | 
|  | 1242 | if (gp->phy_mii.advertising != 0) | 
|  | 1243 | advertise &= gp->phy_mii.advertising; | 
|  | 1244 |  | 
|  | 1245 | autoneg = gp->want_autoneg; | 
|  | 1246 | speed = gp->phy_mii.speed; | 
|  | 1247 | duplex = gp->phy_mii.duplex; | 
|  | 1248 |  | 
|  | 1249 | /* Setup link parameters */ | 
|  | 1250 | if (!ep) | 
|  | 1251 | goto start_aneg; | 
|  | 1252 | if (ep->autoneg == AUTONEG_ENABLE) { | 
|  | 1253 | advertise = ep->advertising; | 
|  | 1254 | autoneg = 1; | 
|  | 1255 | } else { | 
|  | 1256 | autoneg = 0; | 
|  | 1257 | speed = ep->speed; | 
|  | 1258 | duplex = ep->duplex; | 
|  | 1259 | } | 
|  | 1260 |  | 
|  | 1261 | start_aneg: | 
|  | 1262 | /* Sanitize settings based on PHY capabilities */ | 
|  | 1263 | if ((features & SUPPORTED_Autoneg) == 0) | 
|  | 1264 | autoneg = 0; | 
|  | 1265 | if (speed == SPEED_1000 && | 
|  | 1266 | !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) | 
|  | 1267 | speed = SPEED_100; | 
|  | 1268 | if (speed == SPEED_100 && | 
|  | 1269 | !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) | 
|  | 1270 | speed = SPEED_10; | 
|  | 1271 | if (duplex == DUPLEX_FULL && | 
|  | 1272 | !(features & (SUPPORTED_1000baseT_Full | | 
|  | 1273 | SUPPORTED_100baseT_Full | | 
|  | 1274 | SUPPORTED_10baseT_Full))) | 
|  | 1275 | duplex = DUPLEX_HALF; | 
|  | 1276 | if (speed == 0) | 
|  | 1277 | speed = SPEED_10; | 
|  | 1278 |  | 
|  | 1279 | /* If we are asleep, we don't try to actually setup the PHY, we | 
|  | 1280 | * just store the settings | 
|  | 1281 | */ | 
|  | 1282 | if (gp->asleep) { | 
|  | 1283 | gp->phy_mii.autoneg = gp->want_autoneg = autoneg; | 
|  | 1284 | gp->phy_mii.speed = speed; | 
|  | 1285 | gp->phy_mii.duplex = duplex; | 
|  | 1286 | return; | 
|  | 1287 | } | 
|  | 1288 |  | 
|  | 1289 | /* Configure PHY & start aneg */ | 
|  | 1290 | gp->want_autoneg = autoneg; | 
|  | 1291 | if (autoneg) { | 
|  | 1292 | if (found_mii_phy(gp)) | 
|  | 1293 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); | 
|  | 1294 | gp->lstate = link_aneg; | 
|  | 1295 | } else { | 
|  | 1296 | if (found_mii_phy(gp)) | 
|  | 1297 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); | 
|  | 1298 | gp->lstate = link_force_ok; | 
|  | 1299 | } | 
|  | 1300 |  | 
|  | 1301 | non_mii: | 
|  | 1302 | gp->timer_ticks = 0; | 
|  | 1303 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); | 
|  | 1304 | } | 
|  | 1305 |  | 
|  | 1306 | /* A link-up condition has occurred, initialize and enable the | 
|  | 1307 | * rest of the chip. | 
|  | 1308 | * | 
|  | 1309 | * Must be invoked under gp->lock and gp->tx_lock. | 
|  | 1310 | */ | 
|  | 1311 | static int gem_set_link_modes(struct gem *gp) | 
|  | 1312 | { | 
|  | 1313 | u32 val; | 
|  | 1314 | int full_duplex, speed, pause; | 
|  | 1315 |  | 
|  | 1316 | full_duplex = 0; | 
|  | 1317 | speed = SPEED_10; | 
|  | 1318 | pause = 0; | 
|  | 1319 |  | 
|  | 1320 | if (found_mii_phy(gp)) { | 
|  | 1321 | if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) | 
|  | 1322 | return 1; | 
|  | 1323 | full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); | 
|  | 1324 | speed = gp->phy_mii.speed; | 
|  | 1325 | pause = gp->phy_mii.pause; | 
|  | 1326 | } else if (gp->phy_type == phy_serialink || | 
|  | 1327 | gp->phy_type == phy_serdes) { | 
|  | 1328 | u32 pcs_lpa = readl(gp->regs + PCS_MIILP); | 
|  | 1329 |  | 
|  | 1330 | if (pcs_lpa & PCS_MIIADV_FD) | 
|  | 1331 | full_duplex = 1; | 
|  | 1332 | speed = SPEED_1000; | 
|  | 1333 | } | 
|  | 1334 |  | 
|  | 1335 | if (netif_msg_link(gp)) | 
|  | 1336 | printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", | 
|  | 1337 | gp->dev->name, speed, (full_duplex ? "full" : "half")); | 
|  | 1338 |  | 
|  | 1339 | if (!gp->running) | 
|  | 1340 | return 0; | 
|  | 1341 |  | 
|  | 1342 | val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); | 
|  | 1343 | if (full_duplex) { | 
|  | 1344 | val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); | 
|  | 1345 | } else { | 
|  | 1346 | /* MAC_TXCFG_NBO must be zero. */ | 
|  | 1347 | } | 
|  | 1348 | writel(val, gp->regs + MAC_TXCFG); | 
|  | 1349 |  | 
|  | 1350 | val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); | 
|  | 1351 | if (!full_duplex && | 
|  | 1352 | (gp->phy_type == phy_mii_mdio0 || | 
|  | 1353 | gp->phy_type == phy_mii_mdio1)) { | 
|  | 1354 | val |= MAC_XIFCFG_DISE; | 
|  | 1355 | } else if (full_duplex) { | 
|  | 1356 | val |= MAC_XIFCFG_FLED; | 
|  | 1357 | } | 
|  | 1358 |  | 
|  | 1359 | if (speed == SPEED_1000) | 
|  | 1360 | val |= (MAC_XIFCFG_GMII); | 
|  | 1361 |  | 
|  | 1362 | writel(val, gp->regs + MAC_XIFCFG); | 
|  | 1363 |  | 
|  | 1364 | /* If gigabit and half-duplex, enable carrier extension | 
|  | 1365 | * mode.  Else, disable it. | 
|  | 1366 | */ | 
|  | 1367 | if (speed == SPEED_1000 && !full_duplex) { | 
|  | 1368 | val = readl(gp->regs + MAC_TXCFG); | 
|  | 1369 | writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); | 
|  | 1370 |  | 
|  | 1371 | val = readl(gp->regs + MAC_RXCFG); | 
|  | 1372 | writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); | 
|  | 1373 | } else { | 
|  | 1374 | val = readl(gp->regs + MAC_TXCFG); | 
|  | 1375 | writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); | 
|  | 1376 |  | 
|  | 1377 | val = readl(gp->regs + MAC_RXCFG); | 
|  | 1378 | writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); | 
|  | 1379 | } | 
|  | 1380 |  | 
|  | 1381 | if (gp->phy_type == phy_serialink || | 
|  | 1382 | gp->phy_type == phy_serdes) { | 
|  | 1383 | u32 pcs_lpa = readl(gp->regs + PCS_MIILP); | 
|  | 1384 |  | 
|  | 1385 | if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) | 
|  | 1386 | pause = 1; | 
|  | 1387 | } | 
|  | 1388 |  | 
|  | 1389 | if (netif_msg_link(gp)) { | 
|  | 1390 | if (pause) { | 
|  | 1391 | printk(KERN_INFO "%s: Pause is enabled " | 
|  | 1392 | "(rxfifo: %d off: %d on: %d)\n", | 
|  | 1393 | gp->dev->name, | 
|  | 1394 | gp->rx_fifo_sz, | 
|  | 1395 | gp->rx_pause_off, | 
|  | 1396 | gp->rx_pause_on); | 
|  | 1397 | } else { | 
|  | 1398 | printk(KERN_INFO "%s: Pause is disabled\n", | 
|  | 1399 | gp->dev->name); | 
|  | 1400 | } | 
|  | 1401 | } | 
|  | 1402 |  | 
|  | 1403 | if (!full_duplex) | 
|  | 1404 | writel(512, gp->regs + MAC_STIME); | 
|  | 1405 | else | 
|  | 1406 | writel(64, gp->regs + MAC_STIME); | 
|  | 1407 | val = readl(gp->regs + MAC_MCCFG); | 
|  | 1408 | if (pause) | 
|  | 1409 | val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); | 
|  | 1410 | else | 
|  | 1411 | val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); | 
|  | 1412 | writel(val, gp->regs + MAC_MCCFG); | 
|  | 1413 |  | 
|  | 1414 | gem_start_dma(gp); | 
|  | 1415 |  | 
|  | 1416 | return 0; | 
|  | 1417 | } | 
|  | 1418 |  | 
|  | 1419 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1420 | static int gem_mdio_link_not_up(struct gem *gp) | 
|  | 1421 | { | 
|  | 1422 | switch (gp->lstate) { | 
|  | 1423 | case link_force_ret: | 
|  | 1424 | if (netif_msg_link(gp)) | 
|  | 1425 | printk(KERN_INFO "%s: Autoneg failed again, keeping" | 
|  | 1426 | " forced mode\n", gp->dev->name); | 
|  | 1427 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, | 
|  | 1428 | gp->last_forced_speed, DUPLEX_HALF); | 
|  | 1429 | gp->timer_ticks = 5; | 
|  | 1430 | gp->lstate = link_force_ok; | 
|  | 1431 | return 0; | 
|  | 1432 | case link_aneg: | 
|  | 1433 | /* We try forced modes after a failed aneg only on PHYs that don't | 
|  | 1434 | * have "magic_aneg" bit set, which means they internally do the | 
|  | 1435 | * while forced-mode thingy. On these, we just restart aneg | 
|  | 1436 | */ | 
|  | 1437 | if (gp->phy_mii.def->magic_aneg) | 
|  | 1438 | return 1; | 
|  | 1439 | if (netif_msg_link(gp)) | 
|  | 1440 | printk(KERN_INFO "%s: switching to forced 100bt\n", | 
|  | 1441 | gp->dev->name); | 
|  | 1442 | /* Try forced modes. */ | 
|  | 1443 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, | 
|  | 1444 | DUPLEX_HALF); | 
|  | 1445 | gp->timer_ticks = 5; | 
|  | 1446 | gp->lstate = link_force_try; | 
|  | 1447 | return 0; | 
|  | 1448 | case link_force_try: | 
|  | 1449 | /* Downgrade from 100 to 10 Mbps if necessary. | 
|  | 1450 | * If already at 10Mbps, warn user about the | 
|  | 1451 | * situation every 10 ticks. | 
|  | 1452 | */ | 
|  | 1453 | if (gp->phy_mii.speed == SPEED_100) { | 
|  | 1454 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, | 
|  | 1455 | DUPLEX_HALF); | 
|  | 1456 | gp->timer_ticks = 5; | 
|  | 1457 | if (netif_msg_link(gp)) | 
|  | 1458 | printk(KERN_INFO "%s: switching to forced 10bt\n", | 
|  | 1459 | gp->dev->name); | 
|  | 1460 | return 0; | 
|  | 1461 | } else | 
|  | 1462 | return 1; | 
|  | 1463 | default: | 
|  | 1464 | return 0; | 
|  | 1465 | } | 
|  | 1466 | } | 
|  | 1467 |  | 
|  | 1468 | static void gem_link_timer(unsigned long data) | 
|  | 1469 | { | 
|  | 1470 | struct gem *gp = (struct gem *) data; | 
|  | 1471 | int restart_aneg = 0; | 
|  | 1472 |  | 
|  | 1473 | if (gp->asleep) | 
|  | 1474 | return; | 
|  | 1475 |  | 
|  | 1476 | spin_lock_irq(&gp->lock); | 
|  | 1477 | spin_lock(&gp->tx_lock); | 
|  | 1478 | gem_get_cell(gp); | 
|  | 1479 |  | 
|  | 1480 | /* If the reset task is still pending, we just | 
|  | 1481 | * reschedule the link timer | 
|  | 1482 | */ | 
|  | 1483 | if (gp->reset_task_pending) | 
|  | 1484 | goto restart; | 
|  | 1485 |  | 
|  | 1486 | if (gp->phy_type == phy_serialink || | 
|  | 1487 | gp->phy_type == phy_serdes) { | 
|  | 1488 | u32 val = readl(gp->regs + PCS_MIISTAT); | 
|  | 1489 |  | 
|  | 1490 | if (!(val & PCS_MIISTAT_LS)) | 
|  | 1491 | val = readl(gp->regs + PCS_MIISTAT); | 
|  | 1492 |  | 
|  | 1493 | if ((val & PCS_MIISTAT_LS) != 0) { | 
|  | 1494 | gp->lstate = link_up; | 
|  | 1495 | netif_carrier_on(gp->dev); | 
|  | 1496 | (void)gem_set_link_modes(gp); | 
|  | 1497 | } | 
|  | 1498 | goto restart; | 
|  | 1499 | } | 
|  | 1500 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { | 
|  | 1501 | /* Ok, here we got a link. If we had it due to a forced | 
|  | 1502 | * fallback, and we were configured for autoneg, we do | 
|  | 1503 | * retry a short autoneg pass. If you know your hub is | 
|  | 1504 | * broken, use ethtool ;) | 
|  | 1505 | */ | 
|  | 1506 | if (gp->lstate == link_force_try && gp->want_autoneg) { | 
|  | 1507 | gp->lstate = link_force_ret; | 
|  | 1508 | gp->last_forced_speed = gp->phy_mii.speed; | 
|  | 1509 | gp->timer_ticks = 5; | 
|  | 1510 | if (netif_msg_link(gp)) | 
|  | 1511 | printk(KERN_INFO "%s: Got link after fallback, retrying" | 
|  | 1512 | " autoneg once...\n", gp->dev->name); | 
|  | 1513 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); | 
|  | 1514 | } else if (gp->lstate != link_up) { | 
|  | 1515 | gp->lstate = link_up; | 
|  | 1516 | netif_carrier_on(gp->dev); | 
|  | 1517 | if (gem_set_link_modes(gp)) | 
|  | 1518 | restart_aneg = 1; | 
|  | 1519 | } | 
|  | 1520 | } else { | 
|  | 1521 | /* If the link was previously up, we restart the | 
|  | 1522 | * whole process | 
|  | 1523 | */ | 
|  | 1524 | if (gp->lstate == link_up) { | 
|  | 1525 | gp->lstate = link_down; | 
|  | 1526 | if (netif_msg_link(gp)) | 
|  | 1527 | printk(KERN_INFO "%s: Link down\n", | 
|  | 1528 | gp->dev->name); | 
|  | 1529 | netif_carrier_off(gp->dev); | 
|  | 1530 | gp->reset_task_pending = 1; | 
|  | 1531 | schedule_work(&gp->reset_task); | 
|  | 1532 | restart_aneg = 1; | 
|  | 1533 | } else if (++gp->timer_ticks > 10) { | 
|  | 1534 | if (found_mii_phy(gp)) | 
|  | 1535 | restart_aneg = gem_mdio_link_not_up(gp); | 
|  | 1536 | else | 
|  | 1537 | restart_aneg = 1; | 
|  | 1538 | } | 
|  | 1539 | } | 
|  | 1540 | if (restart_aneg) { | 
|  | 1541 | gem_begin_auto_negotiation(gp, NULL); | 
|  | 1542 | goto out_unlock; | 
|  | 1543 | } | 
|  | 1544 | restart: | 
|  | 1545 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); | 
|  | 1546 | out_unlock: | 
|  | 1547 | gem_put_cell(gp); | 
|  | 1548 | spin_unlock(&gp->tx_lock); | 
|  | 1549 | spin_unlock_irq(&gp->lock); | 
|  | 1550 | } | 
|  | 1551 |  | 
|  | 1552 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1553 | static void gem_clean_rings(struct gem *gp) | 
|  | 1554 | { | 
|  | 1555 | struct gem_init_block *gb = gp->init_block; | 
|  | 1556 | struct sk_buff *skb; | 
|  | 1557 | int i; | 
|  | 1558 | dma_addr_t dma_addr; | 
|  | 1559 |  | 
|  | 1560 | for (i = 0; i < RX_RING_SIZE; i++) { | 
|  | 1561 | struct gem_rxd *rxd; | 
|  | 1562 |  | 
|  | 1563 | rxd = &gb->rxd[i]; | 
|  | 1564 | if (gp->rx_skbs[i] != NULL) { | 
|  | 1565 | skb = gp->rx_skbs[i]; | 
|  | 1566 | dma_addr = le64_to_cpu(rxd->buffer); | 
|  | 1567 | pci_unmap_page(gp->pdev, dma_addr, | 
|  | 1568 | RX_BUF_ALLOC_SIZE(gp), | 
|  | 1569 | PCI_DMA_FROMDEVICE); | 
|  | 1570 | dev_kfree_skb_any(skb); | 
|  | 1571 | gp->rx_skbs[i] = NULL; | 
|  | 1572 | } | 
|  | 1573 | rxd->status_word = 0; | 
|  | 1574 | wmb(); | 
|  | 1575 | rxd->buffer = 0; | 
|  | 1576 | } | 
|  | 1577 |  | 
|  | 1578 | for (i = 0; i < TX_RING_SIZE; i++) { | 
|  | 1579 | if (gp->tx_skbs[i] != NULL) { | 
|  | 1580 | struct gem_txd *txd; | 
|  | 1581 | int frag; | 
|  | 1582 |  | 
|  | 1583 | skb = gp->tx_skbs[i]; | 
|  | 1584 | gp->tx_skbs[i] = NULL; | 
|  | 1585 |  | 
|  | 1586 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { | 
|  | 1587 | int ent = i & (TX_RING_SIZE - 1); | 
|  | 1588 |  | 
|  | 1589 | txd = &gb->txd[ent]; | 
|  | 1590 | dma_addr = le64_to_cpu(txd->buffer); | 
|  | 1591 | pci_unmap_page(gp->pdev, dma_addr, | 
|  | 1592 | le64_to_cpu(txd->control_word) & | 
|  | 1593 | TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); | 
|  | 1594 |  | 
|  | 1595 | if (frag != skb_shinfo(skb)->nr_frags) | 
|  | 1596 | i++; | 
|  | 1597 | } | 
|  | 1598 | dev_kfree_skb_any(skb); | 
|  | 1599 | } | 
|  | 1600 | } | 
|  | 1601 | } | 
|  | 1602 |  | 
|  | 1603 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1604 | static void gem_init_rings(struct gem *gp) | 
|  | 1605 | { | 
|  | 1606 | struct gem_init_block *gb = gp->init_block; | 
|  | 1607 | struct net_device *dev = gp->dev; | 
|  | 1608 | int i; | 
|  | 1609 | dma_addr_t dma_addr; | 
|  | 1610 |  | 
|  | 1611 | gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; | 
|  | 1612 |  | 
|  | 1613 | gem_clean_rings(gp); | 
|  | 1614 |  | 
|  | 1615 | gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, | 
|  | 1616 | (unsigned)VLAN_ETH_FRAME_LEN); | 
|  | 1617 |  | 
|  | 1618 | for (i = 0; i < RX_RING_SIZE; i++) { | 
|  | 1619 | struct sk_buff *skb; | 
|  | 1620 | struct gem_rxd *rxd = &gb->rxd[i]; | 
|  | 1621 |  | 
|  | 1622 | skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); | 
|  | 1623 | if (!skb) { | 
|  | 1624 | rxd->buffer = 0; | 
|  | 1625 | rxd->status_word = 0; | 
|  | 1626 | continue; | 
|  | 1627 | } | 
|  | 1628 |  | 
|  | 1629 | gp->rx_skbs[i] = skb; | 
|  | 1630 | skb->dev = dev; | 
|  | 1631 | skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); | 
|  | 1632 | dma_addr = pci_map_page(gp->pdev, | 
|  | 1633 | virt_to_page(skb->data), | 
|  | 1634 | offset_in_page(skb->data), | 
|  | 1635 | RX_BUF_ALLOC_SIZE(gp), | 
|  | 1636 | PCI_DMA_FROMDEVICE); | 
|  | 1637 | rxd->buffer = cpu_to_le64(dma_addr); | 
|  | 1638 | wmb(); | 
|  | 1639 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); | 
|  | 1640 | skb_reserve(skb, RX_OFFSET); | 
|  | 1641 | } | 
|  | 1642 |  | 
|  | 1643 | for (i = 0; i < TX_RING_SIZE; i++) { | 
|  | 1644 | struct gem_txd *txd = &gb->txd[i]; | 
|  | 1645 |  | 
|  | 1646 | txd->control_word = 0; | 
|  | 1647 | wmb(); | 
|  | 1648 | txd->buffer = 0; | 
|  | 1649 | } | 
|  | 1650 | wmb(); | 
|  | 1651 | } | 
|  | 1652 |  | 
|  | 1653 | /* Init PHY interface and start link poll state machine */ | 
|  | 1654 | static void gem_init_phy(struct gem *gp) | 
|  | 1655 | { | 
|  | 1656 | u32 mifcfg; | 
|  | 1657 |  | 
|  | 1658 | /* Revert MIF CFG setting done on stop_phy */ | 
|  | 1659 | mifcfg = readl(gp->regs + MIF_CFG); | 
|  | 1660 | mifcfg &= ~MIF_CFG_BBMODE; | 
|  | 1661 | writel(mifcfg, gp->regs + MIF_CFG); | 
|  | 1662 |  | 
|  | 1663 | if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { | 
|  | 1664 | int i; | 
|  | 1665 |  | 
|  | 1666 | /* Those delay sucks, the HW seem to love them though, I'll | 
|  | 1667 | * serisouly consider breaking some locks here to be able | 
|  | 1668 | * to schedule instead | 
|  | 1669 | */ | 
|  | 1670 | for (i = 0; i < 3; i++) { | 
|  | 1671 | #ifdef CONFIG_PPC_PMAC | 
|  | 1672 | pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); | 
|  | 1673 | msleep(20); | 
|  | 1674 | #endif | 
|  | 1675 | /* Some PHYs used by apple have problem getting back to us, | 
|  | 1676 | * we do an additional reset here | 
|  | 1677 | */ | 
|  | 1678 | phy_write(gp, MII_BMCR, BMCR_RESET); | 
|  | 1679 | msleep(20); | 
|  | 1680 | if (phy_read(gp, MII_BMCR) != 0xffff) | 
|  | 1681 | break; | 
|  | 1682 | if (i == 2) | 
|  | 1683 | printk(KERN_WARNING "%s: GMAC PHY not responding !\n", | 
|  | 1684 | gp->dev->name); | 
|  | 1685 | } | 
|  | 1686 | } | 
|  | 1687 |  | 
|  | 1688 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && | 
|  | 1689 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { | 
|  | 1690 | u32 val; | 
|  | 1691 |  | 
|  | 1692 | /* Init datapath mode register. */ | 
|  | 1693 | if (gp->phy_type == phy_mii_mdio0 || | 
|  | 1694 | gp->phy_type == phy_mii_mdio1) { | 
|  | 1695 | val = PCS_DMODE_MGM; | 
|  | 1696 | } else if (gp->phy_type == phy_serialink) { | 
|  | 1697 | val = PCS_DMODE_SM | PCS_DMODE_GMOE; | 
|  | 1698 | } else { | 
|  | 1699 | val = PCS_DMODE_ESM; | 
|  | 1700 | } | 
|  | 1701 |  | 
|  | 1702 | writel(val, gp->regs + PCS_DMODE); | 
|  | 1703 | } | 
|  | 1704 |  | 
|  | 1705 | if (gp->phy_type == phy_mii_mdio0 || | 
|  | 1706 | gp->phy_type == phy_mii_mdio1) { | 
|  | 1707 | // XXX check for errors | 
|  | 1708 | mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); | 
|  | 1709 |  | 
|  | 1710 | /* Init PHY */ | 
|  | 1711 | if (gp->phy_mii.def && gp->phy_mii.def->ops->init) | 
|  | 1712 | gp->phy_mii.def->ops->init(&gp->phy_mii); | 
|  | 1713 | } else { | 
|  | 1714 | u32 val; | 
|  | 1715 | int limit; | 
|  | 1716 |  | 
|  | 1717 | /* Reset PCS unit. */ | 
|  | 1718 | val = readl(gp->regs + PCS_MIICTRL); | 
|  | 1719 | val |= PCS_MIICTRL_RST; | 
|  | 1720 | writeb(val, gp->regs + PCS_MIICTRL); | 
|  | 1721 |  | 
|  | 1722 | limit = 32; | 
|  | 1723 | while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { | 
|  | 1724 | udelay(100); | 
|  | 1725 | if (limit-- <= 0) | 
|  | 1726 | break; | 
|  | 1727 | } | 
|  | 1728 | if (limit <= 0) | 
|  | 1729 | printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", | 
|  | 1730 | gp->dev->name); | 
|  | 1731 |  | 
|  | 1732 | /* Make sure PCS is disabled while changing advertisement | 
|  | 1733 | * configuration. | 
|  | 1734 | */ | 
|  | 1735 | val = readl(gp->regs + PCS_CFG); | 
|  | 1736 | val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); | 
|  | 1737 | writel(val, gp->regs + PCS_CFG); | 
|  | 1738 |  | 
|  | 1739 | /* Advertise all capabilities except assymetric | 
|  | 1740 | * pause. | 
|  | 1741 | */ | 
|  | 1742 | val = readl(gp->regs + PCS_MIIADV); | 
|  | 1743 | val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | | 
|  | 1744 | PCS_MIIADV_SP | PCS_MIIADV_AP); | 
|  | 1745 | writel(val, gp->regs + PCS_MIIADV); | 
|  | 1746 |  | 
|  | 1747 | /* Enable and restart auto-negotiation, disable wrapback/loopback, | 
|  | 1748 | * and re-enable PCS. | 
|  | 1749 | */ | 
|  | 1750 | val = readl(gp->regs + PCS_MIICTRL); | 
|  | 1751 | val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); | 
|  | 1752 | val &= ~PCS_MIICTRL_WB; | 
|  | 1753 | writel(val, gp->regs + PCS_MIICTRL); | 
|  | 1754 |  | 
|  | 1755 | val = readl(gp->regs + PCS_CFG); | 
|  | 1756 | val |= PCS_CFG_ENABLE; | 
|  | 1757 | writel(val, gp->regs + PCS_CFG); | 
|  | 1758 |  | 
|  | 1759 | /* Make sure serialink loopback is off.  The meaning | 
|  | 1760 | * of this bit is logically inverted based upon whether | 
|  | 1761 | * you are in Serialink or SERDES mode. | 
|  | 1762 | */ | 
|  | 1763 | val = readl(gp->regs + PCS_SCTRL); | 
|  | 1764 | if (gp->phy_type == phy_serialink) | 
|  | 1765 | val &= ~PCS_SCTRL_LOOP; | 
|  | 1766 | else | 
|  | 1767 | val |= PCS_SCTRL_LOOP; | 
|  | 1768 | writel(val, gp->regs + PCS_SCTRL); | 
|  | 1769 | } | 
|  | 1770 |  | 
|  | 1771 | /* Default aneg parameters */ | 
|  | 1772 | gp->timer_ticks = 0; | 
|  | 1773 | gp->lstate = link_down; | 
|  | 1774 | netif_carrier_off(gp->dev); | 
|  | 1775 |  | 
|  | 1776 | /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ | 
|  | 1777 | spin_lock_irq(&gp->lock); | 
|  | 1778 | gem_begin_auto_negotiation(gp, NULL); | 
|  | 1779 | spin_unlock_irq(&gp->lock); | 
|  | 1780 | } | 
|  | 1781 |  | 
|  | 1782 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1783 | static void gem_init_dma(struct gem *gp) | 
|  | 1784 | { | 
|  | 1785 | u64 desc_dma = (u64) gp->gblock_dvma; | 
|  | 1786 | u32 val; | 
|  | 1787 |  | 
|  | 1788 | val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); | 
|  | 1789 | writel(val, gp->regs + TXDMA_CFG); | 
|  | 1790 |  | 
|  | 1791 | writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); | 
|  | 1792 | writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); | 
|  | 1793 | desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); | 
|  | 1794 |  | 
|  | 1795 | writel(0, gp->regs + TXDMA_KICK); | 
|  | 1796 |  | 
|  | 1797 | val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | | 
|  | 1798 | ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); | 
|  | 1799 | writel(val, gp->regs + RXDMA_CFG); | 
|  | 1800 |  | 
|  | 1801 | writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); | 
|  | 1802 | writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); | 
|  | 1803 |  | 
|  | 1804 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); | 
|  | 1805 |  | 
|  | 1806 | val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); | 
|  | 1807 | val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); | 
|  | 1808 | writel(val, gp->regs + RXDMA_PTHRESH); | 
|  | 1809 |  | 
|  | 1810 | if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) | 
|  | 1811 | writel(((5 & RXDMA_BLANK_IPKTS) | | 
|  | 1812 | ((8 << 12) & RXDMA_BLANK_ITIME)), | 
|  | 1813 | gp->regs + RXDMA_BLANK); | 
|  | 1814 | else | 
|  | 1815 | writel(((5 & RXDMA_BLANK_IPKTS) | | 
|  | 1816 | ((4 << 12) & RXDMA_BLANK_ITIME)), | 
|  | 1817 | gp->regs + RXDMA_BLANK); | 
|  | 1818 | } | 
|  | 1819 |  | 
|  | 1820 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1821 | static u32 gem_setup_multicast(struct gem *gp) | 
|  | 1822 | { | 
|  | 1823 | u32 rxcfg = 0; | 
|  | 1824 | int i; | 
|  | 1825 |  | 
|  | 1826 | if ((gp->dev->flags & IFF_ALLMULTI) || | 
|  | 1827 | (gp->dev->mc_count > 256)) { | 
|  | 1828 | for (i=0; i<16; i++) | 
|  | 1829 | writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); | 
|  | 1830 | rxcfg |= MAC_RXCFG_HFE; | 
|  | 1831 | } else if (gp->dev->flags & IFF_PROMISC) { | 
|  | 1832 | rxcfg |= MAC_RXCFG_PROM; | 
|  | 1833 | } else { | 
|  | 1834 | u16 hash_table[16]; | 
|  | 1835 | u32 crc; | 
|  | 1836 | struct dev_mc_list *dmi = gp->dev->mc_list; | 
|  | 1837 | int i; | 
|  | 1838 |  | 
|  | 1839 | for (i = 0; i < 16; i++) | 
|  | 1840 | hash_table[i] = 0; | 
|  | 1841 |  | 
|  | 1842 | for (i = 0; i < gp->dev->mc_count; i++) { | 
|  | 1843 | char *addrs = dmi->dmi_addr; | 
|  | 1844 |  | 
|  | 1845 | dmi = dmi->next; | 
|  | 1846 |  | 
|  | 1847 | if (!(*addrs & 1)) | 
|  | 1848 | continue; | 
|  | 1849 |  | 
|  | 1850 | crc = ether_crc_le(6, addrs); | 
|  | 1851 | crc >>= 24; | 
|  | 1852 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); | 
|  | 1853 | } | 
|  | 1854 | for (i=0; i<16; i++) | 
|  | 1855 | writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); | 
|  | 1856 | rxcfg |= MAC_RXCFG_HFE; | 
|  | 1857 | } | 
|  | 1858 |  | 
|  | 1859 | return rxcfg; | 
|  | 1860 | } | 
|  | 1861 |  | 
|  | 1862 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1863 | static void gem_init_mac(struct gem *gp) | 
|  | 1864 | { | 
|  | 1865 | unsigned char *e = &gp->dev->dev_addr[0]; | 
|  | 1866 |  | 
|  | 1867 | writel(0x1bf0, gp->regs + MAC_SNDPAUSE); | 
|  | 1868 |  | 
|  | 1869 | writel(0x00, gp->regs + MAC_IPG0); | 
|  | 1870 | writel(0x08, gp->regs + MAC_IPG1); | 
|  | 1871 | writel(0x04, gp->regs + MAC_IPG2); | 
|  | 1872 | writel(0x40, gp->regs + MAC_STIME); | 
|  | 1873 | writel(0x40, gp->regs + MAC_MINFSZ); | 
|  | 1874 |  | 
|  | 1875 | /* Ethernet payload + header + FCS + optional VLAN tag. */ | 
|  | 1876 | writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); | 
|  | 1877 |  | 
|  | 1878 | writel(0x07, gp->regs + MAC_PASIZE); | 
|  | 1879 | writel(0x04, gp->regs + MAC_JAMSIZE); | 
|  | 1880 | writel(0x10, gp->regs + MAC_ATTLIM); | 
|  | 1881 | writel(0x8808, gp->regs + MAC_MCTYPE); | 
|  | 1882 |  | 
|  | 1883 | writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); | 
|  | 1884 |  | 
|  | 1885 | writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); | 
|  | 1886 | writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); | 
|  | 1887 | writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); | 
|  | 1888 |  | 
|  | 1889 | writel(0, gp->regs + MAC_ADDR3); | 
|  | 1890 | writel(0, gp->regs + MAC_ADDR4); | 
|  | 1891 | writel(0, gp->regs + MAC_ADDR5); | 
|  | 1892 |  | 
|  | 1893 | writel(0x0001, gp->regs + MAC_ADDR6); | 
|  | 1894 | writel(0xc200, gp->regs + MAC_ADDR7); | 
|  | 1895 | writel(0x0180, gp->regs + MAC_ADDR8); | 
|  | 1896 |  | 
|  | 1897 | writel(0, gp->regs + MAC_AFILT0); | 
|  | 1898 | writel(0, gp->regs + MAC_AFILT1); | 
|  | 1899 | writel(0, gp->regs + MAC_AFILT2); | 
|  | 1900 | writel(0, gp->regs + MAC_AF21MSK); | 
|  | 1901 | writel(0, gp->regs + MAC_AF0MSK); | 
|  | 1902 |  | 
|  | 1903 | gp->mac_rx_cfg = gem_setup_multicast(gp); | 
|  | 1904 | #ifdef STRIP_FCS | 
|  | 1905 | gp->mac_rx_cfg |= MAC_RXCFG_SFCS; | 
|  | 1906 | #endif | 
|  | 1907 | writel(0, gp->regs + MAC_NCOLL); | 
|  | 1908 | writel(0, gp->regs + MAC_FASUCC); | 
|  | 1909 | writel(0, gp->regs + MAC_ECOLL); | 
|  | 1910 | writel(0, gp->regs + MAC_LCOLL); | 
|  | 1911 | writel(0, gp->regs + MAC_DTIMER); | 
|  | 1912 | writel(0, gp->regs + MAC_PATMPS); | 
|  | 1913 | writel(0, gp->regs + MAC_RFCTR); | 
|  | 1914 | writel(0, gp->regs + MAC_LERR); | 
|  | 1915 | writel(0, gp->regs + MAC_AERR); | 
|  | 1916 | writel(0, gp->regs + MAC_FCSERR); | 
|  | 1917 | writel(0, gp->regs + MAC_RXCVERR); | 
|  | 1918 |  | 
|  | 1919 | /* Clear RX/TX/MAC/XIF config, we will set these up and enable | 
|  | 1920 | * them once a link is established. | 
|  | 1921 | */ | 
|  | 1922 | writel(0, gp->regs + MAC_TXCFG); | 
|  | 1923 | writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); | 
|  | 1924 | writel(0, gp->regs + MAC_MCCFG); | 
|  | 1925 | writel(0, gp->regs + MAC_XIFCFG); | 
|  | 1926 |  | 
|  | 1927 | /* Setup MAC interrupts.  We want to get all of the interesting | 
|  | 1928 | * counter expiration events, but we do not want to hear about | 
|  | 1929 | * normal rx/tx as the DMA engine tells us that. | 
|  | 1930 | */ | 
|  | 1931 | writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); | 
|  | 1932 | writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); | 
|  | 1933 |  | 
|  | 1934 | /* Don't enable even the PAUSE interrupts for now, we | 
|  | 1935 | * make no use of those events other than to record them. | 
|  | 1936 | */ | 
|  | 1937 | writel(0xffffffff, gp->regs + MAC_MCMASK); | 
|  | 1938 |  | 
|  | 1939 | /* Don't enable GEM's WOL in normal operations | 
|  | 1940 | */ | 
|  | 1941 | if (gp->has_wol) | 
|  | 1942 | writel(0, gp->regs + WOL_WAKECSR); | 
|  | 1943 | } | 
|  | 1944 |  | 
|  | 1945 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 1946 | static void gem_init_pause_thresholds(struct gem *gp) | 
|  | 1947 | { | 
|  | 1948 | u32 cfg; | 
|  | 1949 |  | 
|  | 1950 | /* Calculate pause thresholds.  Setting the OFF threshold to the | 
|  | 1951 | * full RX fifo size effectively disables PAUSE generation which | 
|  | 1952 | * is what we do for 10/100 only GEMs which have FIFOs too small | 
|  | 1953 | * to make real gains from PAUSE. | 
|  | 1954 | */ | 
|  | 1955 | if (gp->rx_fifo_sz <= (2 * 1024)) { | 
|  | 1956 | gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; | 
|  | 1957 | } else { | 
|  | 1958 | int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; | 
|  | 1959 | int off = (gp->rx_fifo_sz - (max_frame * 2)); | 
|  | 1960 | int on = off - max_frame; | 
|  | 1961 |  | 
|  | 1962 | gp->rx_pause_off = off; | 
|  | 1963 | gp->rx_pause_on = on; | 
|  | 1964 | } | 
|  | 1965 |  | 
|  | 1966 |  | 
|  | 1967 | /* Configure the chip "burst" DMA mode & enable some | 
|  | 1968 | * HW bug fixes on Apple version | 
|  | 1969 | */ | 
|  | 1970 | cfg  = 0; | 
|  | 1971 | if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) | 
|  | 1972 | cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; | 
|  | 1973 | #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) | 
|  | 1974 | cfg |= GREG_CFG_IBURST; | 
|  | 1975 | #endif | 
|  | 1976 | cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); | 
|  | 1977 | cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); | 
|  | 1978 | writel(cfg, gp->regs + GREG_CFG); | 
|  | 1979 |  | 
|  | 1980 | /* If Infinite Burst didn't stick, then use different | 
|  | 1981 | * thresholds (and Apple bug fixes don't exist) | 
|  | 1982 | */ | 
|  | 1983 | if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { | 
|  | 1984 | cfg = ((2 << 1) & GREG_CFG_TXDMALIM); | 
|  | 1985 | cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); | 
|  | 1986 | writel(cfg, gp->regs + GREG_CFG); | 
|  | 1987 | } | 
|  | 1988 | } | 
|  | 1989 |  | 
|  | 1990 | static int gem_check_invariants(struct gem *gp) | 
|  | 1991 | { | 
|  | 1992 | struct pci_dev *pdev = gp->pdev; | 
|  | 1993 | u32 mif_cfg; | 
|  | 1994 |  | 
|  | 1995 | /* On Apple's sungem, we can't rely on registers as the chip | 
|  | 1996 | * was been powered down by the firmware. The PHY is looked | 
|  | 1997 | * up later on. | 
|  | 1998 | */ | 
|  | 1999 | if (pdev->vendor == PCI_VENDOR_ID_APPLE) { | 
|  | 2000 | gp->phy_type = phy_mii_mdio0; | 
|  | 2001 | gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; | 
|  | 2002 | gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; | 
|  | 2003 | gp->swrst_base = 0; | 
|  | 2004 |  | 
|  | 2005 | mif_cfg = readl(gp->regs + MIF_CFG); | 
|  | 2006 | mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); | 
|  | 2007 | mif_cfg |= MIF_CFG_MDI0; | 
|  | 2008 | writel(mif_cfg, gp->regs + MIF_CFG); | 
|  | 2009 | writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); | 
|  | 2010 | writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); | 
|  | 2011 |  | 
|  | 2012 | /* We hard-code the PHY address so we can properly bring it out of | 
|  | 2013 | * reset later on, we can't really probe it at this point, though | 
|  | 2014 | * that isn't an issue. | 
|  | 2015 | */ | 
|  | 2016 | if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) | 
|  | 2017 | gp->mii_phy_addr = 1; | 
|  | 2018 | else | 
|  | 2019 | gp->mii_phy_addr = 0; | 
|  | 2020 |  | 
|  | 2021 | return 0; | 
|  | 2022 | } | 
|  | 2023 |  | 
|  | 2024 | mif_cfg = readl(gp->regs + MIF_CFG); | 
|  | 2025 |  | 
|  | 2026 | if (pdev->vendor == PCI_VENDOR_ID_SUN && | 
|  | 2027 | pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { | 
|  | 2028 | /* One of the MII PHYs _must_ be present | 
|  | 2029 | * as this chip has no gigabit PHY. | 
|  | 2030 | */ | 
|  | 2031 | if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { | 
|  | 2032 | printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", | 
|  | 2033 | mif_cfg); | 
|  | 2034 | return -1; | 
|  | 2035 | } | 
|  | 2036 | } | 
|  | 2037 |  | 
|  | 2038 | /* Determine initial PHY interface type guess.  MDIO1 is the | 
|  | 2039 | * external PHY and thus takes precedence over MDIO0. | 
|  | 2040 | */ | 
|  | 2041 |  | 
|  | 2042 | if (mif_cfg & MIF_CFG_MDI1) { | 
|  | 2043 | gp->phy_type = phy_mii_mdio1; | 
|  | 2044 | mif_cfg |= MIF_CFG_PSELECT; | 
|  | 2045 | writel(mif_cfg, gp->regs + MIF_CFG); | 
|  | 2046 | } else if (mif_cfg & MIF_CFG_MDI0) { | 
|  | 2047 | gp->phy_type = phy_mii_mdio0; | 
|  | 2048 | mif_cfg &= ~MIF_CFG_PSELECT; | 
|  | 2049 | writel(mif_cfg, gp->regs + MIF_CFG); | 
|  | 2050 | } else { | 
|  | 2051 | gp->phy_type = phy_serialink; | 
|  | 2052 | } | 
|  | 2053 | if (gp->phy_type == phy_mii_mdio1 || | 
|  | 2054 | gp->phy_type == phy_mii_mdio0) { | 
|  | 2055 | int i; | 
|  | 2056 |  | 
|  | 2057 | for (i = 0; i < 32; i++) { | 
|  | 2058 | gp->mii_phy_addr = i; | 
|  | 2059 | if (phy_read(gp, MII_BMCR) != 0xffff) | 
|  | 2060 | break; | 
|  | 2061 | } | 
|  | 2062 | if (i == 32) { | 
|  | 2063 | if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { | 
|  | 2064 | printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); | 
|  | 2065 | return -1; | 
|  | 2066 | } | 
|  | 2067 | gp->phy_type = phy_serdes; | 
|  | 2068 | } | 
|  | 2069 | } | 
|  | 2070 |  | 
|  | 2071 | /* Fetch the FIFO configurations now too. */ | 
|  | 2072 | gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; | 
|  | 2073 | gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; | 
|  | 2074 |  | 
|  | 2075 | if (pdev->vendor == PCI_VENDOR_ID_SUN) { | 
|  | 2076 | if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { | 
|  | 2077 | if (gp->tx_fifo_sz != (9 * 1024) || | 
|  | 2078 | gp->rx_fifo_sz != (20 * 1024)) { | 
|  | 2079 | printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", | 
|  | 2080 | gp->tx_fifo_sz, gp->rx_fifo_sz); | 
|  | 2081 | return -1; | 
|  | 2082 | } | 
|  | 2083 | gp->swrst_base = 0; | 
|  | 2084 | } else { | 
|  | 2085 | if (gp->tx_fifo_sz != (2 * 1024) || | 
|  | 2086 | gp->rx_fifo_sz != (2 * 1024)) { | 
|  | 2087 | printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", | 
|  | 2088 | gp->tx_fifo_sz, gp->rx_fifo_sz); | 
|  | 2089 | return -1; | 
|  | 2090 | } | 
|  | 2091 | gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; | 
|  | 2092 | } | 
|  | 2093 | } | 
|  | 2094 |  | 
|  | 2095 | return 0; | 
|  | 2096 | } | 
|  | 2097 |  | 
|  | 2098 | /* Must be invoked under gp->lock and gp->tx_lock. */ | 
|  | 2099 | static void gem_reinit_chip(struct gem *gp) | 
|  | 2100 | { | 
|  | 2101 | /* Reset the chip */ | 
|  | 2102 | gem_reset(gp); | 
|  | 2103 |  | 
|  | 2104 | /* Make sure ints are disabled */ | 
|  | 2105 | gem_disable_ints(gp); | 
|  | 2106 |  | 
|  | 2107 | /* Allocate & setup ring buffers */ | 
|  | 2108 | gem_init_rings(gp); | 
|  | 2109 |  | 
|  | 2110 | /* Configure pause thresholds */ | 
|  | 2111 | gem_init_pause_thresholds(gp); | 
|  | 2112 |  | 
|  | 2113 | /* Init DMA & MAC engines */ | 
|  | 2114 | gem_init_dma(gp); | 
|  | 2115 | gem_init_mac(gp); | 
|  | 2116 | } | 
|  | 2117 |  | 
|  | 2118 |  | 
|  | 2119 | /* Must be invoked with no lock held. */ | 
|  | 2120 | static void gem_stop_phy(struct gem *gp, int wol) | 
|  | 2121 | { | 
|  | 2122 | u32 mifcfg; | 
|  | 2123 | unsigned long flags; | 
|  | 2124 |  | 
|  | 2125 | /* Let the chip settle down a bit, it seems that helps | 
|  | 2126 | * for sleep mode on some models | 
|  | 2127 | */ | 
|  | 2128 | msleep(10); | 
|  | 2129 |  | 
|  | 2130 | /* Make sure we aren't polling PHY status change. We | 
|  | 2131 | * don't currently use that feature though | 
|  | 2132 | */ | 
|  | 2133 | mifcfg = readl(gp->regs + MIF_CFG); | 
|  | 2134 | mifcfg &= ~MIF_CFG_POLL; | 
|  | 2135 | writel(mifcfg, gp->regs + MIF_CFG); | 
|  | 2136 |  | 
|  | 2137 | if (wol && gp->has_wol) { | 
|  | 2138 | unsigned char *e = &gp->dev->dev_addr[0]; | 
|  | 2139 | u32 csr; | 
|  | 2140 |  | 
|  | 2141 | /* Setup wake-on-lan for MAGIC packet */ | 
|  | 2142 | writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, | 
|  | 2143 | gp->regs + MAC_RXCFG); | 
|  | 2144 | writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); | 
|  | 2145 | writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); | 
|  | 2146 | writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); | 
|  | 2147 |  | 
|  | 2148 | writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); | 
|  | 2149 | csr = WOL_WAKECSR_ENABLE; | 
|  | 2150 | if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) | 
|  | 2151 | csr |= WOL_WAKECSR_MII; | 
|  | 2152 | writel(csr, gp->regs + WOL_WAKECSR); | 
|  | 2153 | } else { | 
|  | 2154 | writel(0, gp->regs + MAC_RXCFG); | 
|  | 2155 | (void)readl(gp->regs + MAC_RXCFG); | 
|  | 2156 | /* Machine sleep will die in strange ways if we | 
|  | 2157 | * dont wait a bit here, looks like the chip takes | 
|  | 2158 | * some time to really shut down | 
|  | 2159 | */ | 
|  | 2160 | msleep(10); | 
|  | 2161 | } | 
|  | 2162 |  | 
|  | 2163 | writel(0, gp->regs + MAC_TXCFG); | 
|  | 2164 | writel(0, gp->regs + MAC_XIFCFG); | 
|  | 2165 | writel(0, gp->regs + TXDMA_CFG); | 
|  | 2166 | writel(0, gp->regs + RXDMA_CFG); | 
|  | 2167 |  | 
|  | 2168 | if (!wol) { | 
|  | 2169 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 2170 | spin_lock(&gp->tx_lock); | 
|  | 2171 | gem_reset(gp); | 
|  | 2172 | writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); | 
|  | 2173 | writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); | 
|  | 2174 | spin_unlock(&gp->tx_lock); | 
|  | 2175 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 2176 |  | 
|  | 2177 | /* No need to take the lock here */ | 
|  | 2178 |  | 
|  | 2179 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) | 
|  | 2180 | gp->phy_mii.def->ops->suspend(&gp->phy_mii); | 
|  | 2181 |  | 
|  | 2182 | /* According to Apple, we must set the MDIO pins to this begnign | 
|  | 2183 | * state or we may 1) eat more current, 2) damage some PHYs | 
|  | 2184 | */ | 
|  | 2185 | writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); | 
|  | 2186 | writel(0, gp->regs + MIF_BBCLK); | 
|  | 2187 | writel(0, gp->regs + MIF_BBDATA); | 
|  | 2188 | writel(0, gp->regs + MIF_BBOENAB); | 
|  | 2189 | writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); | 
|  | 2190 | (void) readl(gp->regs + MAC_XIFCFG); | 
|  | 2191 | } | 
|  | 2192 | } | 
|  | 2193 |  | 
|  | 2194 |  | 
|  | 2195 | static int gem_do_start(struct net_device *dev) | 
|  | 2196 | { | 
|  | 2197 | struct gem *gp = dev->priv; | 
|  | 2198 | unsigned long flags; | 
|  | 2199 |  | 
|  | 2200 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 2201 | spin_lock(&gp->tx_lock); | 
|  | 2202 |  | 
|  | 2203 | /* Enable the cell */ | 
|  | 2204 | gem_get_cell(gp); | 
|  | 2205 |  | 
|  | 2206 | /* Init & setup chip hardware */ | 
|  | 2207 | gem_reinit_chip(gp); | 
|  | 2208 |  | 
|  | 2209 | gp->running = 1; | 
|  | 2210 |  | 
|  | 2211 | if (gp->lstate == link_up) { | 
|  | 2212 | netif_carrier_on(gp->dev); | 
|  | 2213 | gem_set_link_modes(gp); | 
|  | 2214 | } | 
|  | 2215 |  | 
|  | 2216 | netif_wake_queue(gp->dev); | 
|  | 2217 |  | 
|  | 2218 | spin_unlock(&gp->tx_lock); | 
|  | 2219 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 2220 |  | 
|  | 2221 | if (request_irq(gp->pdev->irq, gem_interrupt, | 
|  | 2222 | SA_SHIRQ, dev->name, (void *)dev)) { | 
|  | 2223 | printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); | 
|  | 2224 |  | 
|  | 2225 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 2226 | spin_lock(&gp->tx_lock); | 
|  | 2227 |  | 
|  | 2228 | gp->running =  0; | 
|  | 2229 | gem_reset(gp); | 
|  | 2230 | gem_clean_rings(gp); | 
|  | 2231 | gem_put_cell(gp); | 
|  | 2232 |  | 
|  | 2233 | spin_unlock(&gp->tx_lock); | 
|  | 2234 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 2235 |  | 
|  | 2236 | return -EAGAIN; | 
|  | 2237 | } | 
|  | 2238 |  | 
|  | 2239 | return 0; | 
|  | 2240 | } | 
|  | 2241 |  | 
|  | 2242 | static void gem_do_stop(struct net_device *dev, int wol) | 
|  | 2243 | { | 
|  | 2244 | struct gem *gp = dev->priv; | 
|  | 2245 | unsigned long flags; | 
|  | 2246 |  | 
|  | 2247 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 2248 | spin_lock(&gp->tx_lock); | 
|  | 2249 |  | 
|  | 2250 | gp->running = 0; | 
|  | 2251 |  | 
|  | 2252 | /* Stop netif queue */ | 
|  | 2253 | netif_stop_queue(dev); | 
|  | 2254 |  | 
|  | 2255 | /* Make sure ints are disabled */ | 
|  | 2256 | gem_disable_ints(gp); | 
|  | 2257 |  | 
|  | 2258 | /* We can drop the lock now */ | 
|  | 2259 | spin_unlock(&gp->tx_lock); | 
|  | 2260 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 2261 |  | 
|  | 2262 | /* If we are going to sleep with WOL */ | 
|  | 2263 | gem_stop_dma(gp); | 
|  | 2264 | msleep(10); | 
|  | 2265 | if (!wol) | 
|  | 2266 | gem_reset(gp); | 
|  | 2267 | msleep(10); | 
|  | 2268 |  | 
|  | 2269 | /* Get rid of rings */ | 
|  | 2270 | gem_clean_rings(gp); | 
|  | 2271 |  | 
|  | 2272 | /* No irq needed anymore */ | 
|  | 2273 | free_irq(gp->pdev->irq, (void *) dev); | 
|  | 2274 |  | 
|  | 2275 | /* Cell not needed neither if no WOL */ | 
|  | 2276 | if (!wol) { | 
|  | 2277 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 2278 | gem_put_cell(gp); | 
|  | 2279 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 2280 | } | 
|  | 2281 | } | 
|  | 2282 |  | 
|  | 2283 | static void gem_reset_task(void *data) | 
|  | 2284 | { | 
|  | 2285 | struct gem *gp = (struct gem *) data; | 
|  | 2286 |  | 
|  | 2287 | down(&gp->pm_sem); | 
|  | 2288 |  | 
|  | 2289 | netif_poll_disable(gp->dev); | 
|  | 2290 |  | 
|  | 2291 | spin_lock_irq(&gp->lock); | 
|  | 2292 | spin_lock(&gp->tx_lock); | 
|  | 2293 |  | 
|  | 2294 | if (gp->running == 0) | 
|  | 2295 | goto not_running; | 
|  | 2296 |  | 
|  | 2297 | if (gp->running) { | 
|  | 2298 | netif_stop_queue(gp->dev); | 
|  | 2299 |  | 
|  | 2300 | /* Reset the chip & rings */ | 
|  | 2301 | gem_reinit_chip(gp); | 
|  | 2302 | if (gp->lstate == link_up) | 
|  | 2303 | gem_set_link_modes(gp); | 
|  | 2304 | netif_wake_queue(gp->dev); | 
|  | 2305 | } | 
|  | 2306 | not_running: | 
|  | 2307 | gp->reset_task_pending = 0; | 
|  | 2308 |  | 
|  | 2309 | spin_unlock(&gp->tx_lock); | 
|  | 2310 | spin_unlock_irq(&gp->lock); | 
|  | 2311 |  | 
|  | 2312 | netif_poll_enable(gp->dev); | 
|  | 2313 |  | 
|  | 2314 | up(&gp->pm_sem); | 
|  | 2315 | } | 
|  | 2316 |  | 
|  | 2317 |  | 
|  | 2318 | static int gem_open(struct net_device *dev) | 
|  | 2319 | { | 
|  | 2320 | struct gem *gp = dev->priv; | 
|  | 2321 | int rc = 0; | 
|  | 2322 |  | 
|  | 2323 | down(&gp->pm_sem); | 
|  | 2324 |  | 
|  | 2325 | /* We need the cell enabled */ | 
|  | 2326 | if (!gp->asleep) | 
|  | 2327 | rc = gem_do_start(dev); | 
|  | 2328 | gp->opened = (rc == 0); | 
|  | 2329 |  | 
|  | 2330 | up(&gp->pm_sem); | 
|  | 2331 |  | 
|  | 2332 | return rc; | 
|  | 2333 | } | 
|  | 2334 |  | 
|  | 2335 | static int gem_close(struct net_device *dev) | 
|  | 2336 | { | 
|  | 2337 | struct gem *gp = dev->priv; | 
|  | 2338 |  | 
|  | 2339 | /* Note: we don't need to call netif_poll_disable() here because | 
|  | 2340 | * our caller (dev_close) already did it for us | 
|  | 2341 | */ | 
|  | 2342 |  | 
|  | 2343 | down(&gp->pm_sem); | 
|  | 2344 |  | 
|  | 2345 | gp->opened = 0; | 
|  | 2346 | if (!gp->asleep) | 
|  | 2347 | gem_do_stop(dev, 0); | 
|  | 2348 |  | 
|  | 2349 | up(&gp->pm_sem); | 
|  | 2350 |  | 
|  | 2351 | return 0; | 
|  | 2352 | } | 
|  | 2353 |  | 
|  | 2354 | #ifdef CONFIG_PM | 
|  | 2355 | static int gem_suspend(struct pci_dev *pdev, pm_message_t state) | 
|  | 2356 | { | 
|  | 2357 | struct net_device *dev = pci_get_drvdata(pdev); | 
|  | 2358 | struct gem *gp = dev->priv; | 
|  | 2359 | unsigned long flags; | 
|  | 2360 |  | 
|  | 2361 | down(&gp->pm_sem); | 
|  | 2362 |  | 
|  | 2363 | netif_poll_disable(dev); | 
|  | 2364 |  | 
|  | 2365 | printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", | 
|  | 2366 | dev->name, | 
|  | 2367 | (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); | 
|  | 2368 |  | 
|  | 2369 | /* Keep the cell enabled during the entire operation */ | 
|  | 2370 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 2371 | spin_lock(&gp->tx_lock); | 
|  | 2372 | gem_get_cell(gp); | 
|  | 2373 | spin_unlock(&gp->tx_lock); | 
|  | 2374 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 2375 |  | 
|  | 2376 | /* If the driver is opened, we stop the MAC */ | 
|  | 2377 | if (gp->opened) { | 
|  | 2378 | /* Stop traffic, mark us closed */ | 
|  | 2379 | netif_device_detach(dev); | 
|  | 2380 |  | 
|  | 2381 | /* Switch off MAC, remember WOL setting */ | 
|  | 2382 | gp->asleep_wol = gp->wake_on_lan; | 
|  | 2383 | gem_do_stop(dev, gp->asleep_wol); | 
|  | 2384 | } else | 
|  | 2385 | gp->asleep_wol = 0; | 
|  | 2386 |  | 
|  | 2387 | /* Mark us asleep */ | 
|  | 2388 | gp->asleep = 1; | 
|  | 2389 | wmb(); | 
|  | 2390 |  | 
|  | 2391 | /* Stop the link timer */ | 
|  | 2392 | del_timer_sync(&gp->link_timer); | 
|  | 2393 |  | 
|  | 2394 | /* Now we release the semaphore to not block the reset task who | 
|  | 2395 | * can take it too. We are marked asleep, so there will be no | 
|  | 2396 | * conflict here | 
|  | 2397 | */ | 
|  | 2398 | up(&gp->pm_sem); | 
|  | 2399 |  | 
|  | 2400 | /* Wait for a pending reset task to complete */ | 
|  | 2401 | while (gp->reset_task_pending) | 
|  | 2402 | yield(); | 
|  | 2403 | flush_scheduled_work(); | 
|  | 2404 |  | 
|  | 2405 | /* Shut the PHY down eventually and setup WOL */ | 
|  | 2406 | gem_stop_phy(gp, gp->asleep_wol); | 
|  | 2407 |  | 
|  | 2408 | /* Make sure bus master is disabled */ | 
|  | 2409 | pci_disable_device(gp->pdev); | 
|  | 2410 |  | 
|  | 2411 | /* Release the cell, no need to take a lock at this point since | 
|  | 2412 | * nothing else can happen now | 
|  | 2413 | */ | 
|  | 2414 | gem_put_cell(gp); | 
|  | 2415 |  | 
|  | 2416 | return 0; | 
|  | 2417 | } | 
|  | 2418 |  | 
|  | 2419 | static int gem_resume(struct pci_dev *pdev) | 
|  | 2420 | { | 
|  | 2421 | struct net_device *dev = pci_get_drvdata(pdev); | 
|  | 2422 | struct gem *gp = dev->priv; | 
|  | 2423 | unsigned long flags; | 
|  | 2424 |  | 
|  | 2425 | printk(KERN_INFO "%s: resuming\n", dev->name); | 
|  | 2426 |  | 
|  | 2427 | down(&gp->pm_sem); | 
|  | 2428 |  | 
|  | 2429 | /* Keep the cell enabled during the entire operation, no need to | 
|  | 2430 | * take a lock here tho since nothing else can happen while we are | 
|  | 2431 | * marked asleep | 
|  | 2432 | */ | 
|  | 2433 | gem_get_cell(gp); | 
|  | 2434 |  | 
|  | 2435 | /* Make sure PCI access and bus master are enabled */ | 
|  | 2436 | if (pci_enable_device(gp->pdev)) { | 
|  | 2437 | printk(KERN_ERR "%s: Can't re-enable chip !\n", | 
|  | 2438 | dev->name); | 
|  | 2439 | /* Put cell and forget it for now, it will be considered as | 
|  | 2440 | * still asleep, a new sleep cycle may bring it back | 
|  | 2441 | */ | 
|  | 2442 | gem_put_cell(gp); | 
|  | 2443 | up(&gp->pm_sem); | 
|  | 2444 | return 0; | 
|  | 2445 | } | 
|  | 2446 | pci_set_master(gp->pdev); | 
|  | 2447 |  | 
|  | 2448 | /* Reset everything */ | 
|  | 2449 | gem_reset(gp); | 
|  | 2450 |  | 
|  | 2451 | /* Mark us woken up */ | 
|  | 2452 | gp->asleep = 0; | 
|  | 2453 | wmb(); | 
|  | 2454 |  | 
|  | 2455 | /* Bring the PHY back. Again, lock is useless at this point as | 
|  | 2456 | * nothing can be happening until we restart the whole thing | 
|  | 2457 | */ | 
|  | 2458 | gem_init_phy(gp); | 
|  | 2459 |  | 
|  | 2460 | /* If we were opened, bring everything back */ | 
|  | 2461 | if (gp->opened) { | 
|  | 2462 | /* Restart MAC */ | 
|  | 2463 | gem_do_start(dev); | 
|  | 2464 |  | 
|  | 2465 | /* Re-attach net device */ | 
|  | 2466 | netif_device_attach(dev); | 
|  | 2467 |  | 
|  | 2468 | } | 
|  | 2469 |  | 
|  | 2470 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 2471 | spin_lock(&gp->tx_lock); | 
|  | 2472 |  | 
|  | 2473 | /* If we had WOL enabled, the cell clock was never turned off during | 
|  | 2474 | * sleep, so we end up beeing unbalanced. Fix that here | 
|  | 2475 | */ | 
|  | 2476 | if (gp->asleep_wol) | 
|  | 2477 | gem_put_cell(gp); | 
|  | 2478 |  | 
|  | 2479 | /* This function doesn't need to hold the cell, it will be held if the | 
|  | 2480 | * driver is open by gem_do_start(). | 
|  | 2481 | */ | 
|  | 2482 | gem_put_cell(gp); | 
|  | 2483 |  | 
|  | 2484 | spin_unlock(&gp->tx_lock); | 
|  | 2485 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 2486 |  | 
|  | 2487 | netif_poll_enable(dev); | 
|  | 2488 |  | 
|  | 2489 | up(&gp->pm_sem); | 
|  | 2490 |  | 
|  | 2491 | return 0; | 
|  | 2492 | } | 
|  | 2493 | #endif /* CONFIG_PM */ | 
|  | 2494 |  | 
|  | 2495 | static struct net_device_stats *gem_get_stats(struct net_device *dev) | 
|  | 2496 | { | 
|  | 2497 | struct gem *gp = dev->priv; | 
|  | 2498 | struct net_device_stats *stats = &gp->net_stats; | 
|  | 2499 |  | 
|  | 2500 | spin_lock_irq(&gp->lock); | 
|  | 2501 | spin_lock(&gp->tx_lock); | 
|  | 2502 |  | 
|  | 2503 | /* I have seen this being called while the PM was in progress, | 
|  | 2504 | * so we shield against this | 
|  | 2505 | */ | 
|  | 2506 | if (gp->running) { | 
|  | 2507 | stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); | 
|  | 2508 | writel(0, gp->regs + MAC_FCSERR); | 
|  | 2509 |  | 
|  | 2510 | stats->rx_frame_errors += readl(gp->regs + MAC_AERR); | 
|  | 2511 | writel(0, gp->regs + MAC_AERR); | 
|  | 2512 |  | 
|  | 2513 | stats->rx_length_errors += readl(gp->regs + MAC_LERR); | 
|  | 2514 | writel(0, gp->regs + MAC_LERR); | 
|  | 2515 |  | 
|  | 2516 | stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); | 
|  | 2517 | stats->collisions += | 
|  | 2518 | (readl(gp->regs + MAC_ECOLL) + | 
|  | 2519 | readl(gp->regs + MAC_LCOLL)); | 
|  | 2520 | writel(0, gp->regs + MAC_ECOLL); | 
|  | 2521 | writel(0, gp->regs + MAC_LCOLL); | 
|  | 2522 | } | 
|  | 2523 |  | 
|  | 2524 | spin_unlock(&gp->tx_lock); | 
|  | 2525 | spin_unlock_irq(&gp->lock); | 
|  | 2526 |  | 
|  | 2527 | return &gp->net_stats; | 
|  | 2528 | } | 
|  | 2529 |  | 
|  | 2530 | static void gem_set_multicast(struct net_device *dev) | 
|  | 2531 | { | 
|  | 2532 | struct gem *gp = dev->priv; | 
|  | 2533 | u32 rxcfg, rxcfg_new; | 
|  | 2534 | int limit = 10000; | 
|  | 2535 |  | 
|  | 2536 |  | 
|  | 2537 | spin_lock_irq(&gp->lock); | 
|  | 2538 | spin_lock(&gp->tx_lock); | 
|  | 2539 |  | 
|  | 2540 | if (!gp->running) | 
|  | 2541 | goto bail; | 
|  | 2542 |  | 
|  | 2543 | netif_stop_queue(dev); | 
|  | 2544 |  | 
|  | 2545 | rxcfg = readl(gp->regs + MAC_RXCFG); | 
|  | 2546 | rxcfg_new = gem_setup_multicast(gp); | 
|  | 2547 | #ifdef STRIP_FCS | 
|  | 2548 | rxcfg_new |= MAC_RXCFG_SFCS; | 
|  | 2549 | #endif | 
|  | 2550 | gp->mac_rx_cfg = rxcfg_new; | 
|  | 2551 |  | 
|  | 2552 | writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); | 
|  | 2553 | while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { | 
|  | 2554 | if (!limit--) | 
|  | 2555 | break; | 
|  | 2556 | udelay(10); | 
|  | 2557 | } | 
|  | 2558 |  | 
|  | 2559 | rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); | 
|  | 2560 | rxcfg |= rxcfg_new; | 
|  | 2561 |  | 
|  | 2562 | writel(rxcfg, gp->regs + MAC_RXCFG); | 
|  | 2563 |  | 
|  | 2564 | netif_wake_queue(dev); | 
|  | 2565 |  | 
|  | 2566 | bail: | 
|  | 2567 | spin_unlock(&gp->tx_lock); | 
|  | 2568 | spin_unlock_irq(&gp->lock); | 
|  | 2569 | } | 
|  | 2570 |  | 
|  | 2571 | /* Jumbo-grams don't seem to work :-( */ | 
|  | 2572 | #define GEM_MIN_MTU	68 | 
|  | 2573 | #if 1 | 
|  | 2574 | #define GEM_MAX_MTU	1500 | 
|  | 2575 | #else | 
|  | 2576 | #define GEM_MAX_MTU	9000 | 
|  | 2577 | #endif | 
|  | 2578 |  | 
|  | 2579 | static int gem_change_mtu(struct net_device *dev, int new_mtu) | 
|  | 2580 | { | 
|  | 2581 | struct gem *gp = dev->priv; | 
|  | 2582 |  | 
|  | 2583 | if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) | 
|  | 2584 | return -EINVAL; | 
|  | 2585 |  | 
|  | 2586 | if (!netif_running(dev) || !netif_device_present(dev)) { | 
|  | 2587 | /* We'll just catch it later when the | 
|  | 2588 | * device is up'd or resumed. | 
|  | 2589 | */ | 
|  | 2590 | dev->mtu = new_mtu; | 
|  | 2591 | return 0; | 
|  | 2592 | } | 
|  | 2593 |  | 
|  | 2594 | down(&gp->pm_sem); | 
|  | 2595 | spin_lock_irq(&gp->lock); | 
|  | 2596 | spin_lock(&gp->tx_lock); | 
|  | 2597 | dev->mtu = new_mtu; | 
|  | 2598 | if (gp->running) { | 
|  | 2599 | gem_reinit_chip(gp); | 
|  | 2600 | if (gp->lstate == link_up) | 
|  | 2601 | gem_set_link_modes(gp); | 
|  | 2602 | } | 
|  | 2603 | spin_unlock(&gp->tx_lock); | 
|  | 2604 | spin_unlock_irq(&gp->lock); | 
|  | 2605 | up(&gp->pm_sem); | 
|  | 2606 |  | 
|  | 2607 | return 0; | 
|  | 2608 | } | 
|  | 2609 |  | 
|  | 2610 | static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 
|  | 2611 | { | 
|  | 2612 | struct gem *gp = dev->priv; | 
|  | 2613 |  | 
|  | 2614 | strcpy(info->driver, DRV_NAME); | 
|  | 2615 | strcpy(info->version, DRV_VERSION); | 
|  | 2616 | strcpy(info->bus_info, pci_name(gp->pdev)); | 
|  | 2617 | } | 
|  | 2618 |  | 
|  | 2619 | static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
|  | 2620 | { | 
|  | 2621 | struct gem *gp = dev->priv; | 
|  | 2622 |  | 
|  | 2623 | if (gp->phy_type == phy_mii_mdio0 || | 
|  | 2624 | gp->phy_type == phy_mii_mdio1) { | 
|  | 2625 | if (gp->phy_mii.def) | 
|  | 2626 | cmd->supported = gp->phy_mii.def->features; | 
|  | 2627 | else | 
|  | 2628 | cmd->supported = (SUPPORTED_10baseT_Half | | 
|  | 2629 | SUPPORTED_10baseT_Full); | 
|  | 2630 |  | 
|  | 2631 | /* XXX hardcoded stuff for now */ | 
|  | 2632 | cmd->port = PORT_MII; | 
|  | 2633 | cmd->transceiver = XCVR_EXTERNAL; | 
|  | 2634 | cmd->phy_address = 0; /* XXX fixed PHYAD */ | 
|  | 2635 |  | 
|  | 2636 | /* Return current PHY settings */ | 
|  | 2637 | spin_lock_irq(&gp->lock); | 
|  | 2638 | cmd->autoneg = gp->want_autoneg; | 
|  | 2639 | cmd->speed = gp->phy_mii.speed; | 
|  | 2640 | cmd->duplex = gp->phy_mii.duplex; | 
|  | 2641 | cmd->advertising = gp->phy_mii.advertising; | 
|  | 2642 |  | 
|  | 2643 | /* If we started with a forced mode, we don't have a default | 
|  | 2644 | * advertise set, we need to return something sensible so | 
|  | 2645 | * userland can re-enable autoneg properly. | 
|  | 2646 | */ | 
|  | 2647 | if (cmd->advertising == 0) | 
|  | 2648 | cmd->advertising = cmd->supported; | 
|  | 2649 | spin_unlock_irq(&gp->lock); | 
|  | 2650 | } else { // XXX PCS ? | 
|  | 2651 | cmd->supported = | 
|  | 2652 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | 
|  | 2653 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | 
|  | 2654 | SUPPORTED_Autoneg); | 
|  | 2655 | cmd->advertising = cmd->supported; | 
|  | 2656 | cmd->speed = 0; | 
|  | 2657 | cmd->duplex = cmd->port = cmd->phy_address = | 
|  | 2658 | cmd->transceiver = cmd->autoneg = 0; | 
|  | 2659 | } | 
|  | 2660 | cmd->maxtxpkt = cmd->maxrxpkt = 0; | 
|  | 2661 |  | 
|  | 2662 | return 0; | 
|  | 2663 | } | 
|  | 2664 |  | 
|  | 2665 | static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
|  | 2666 | { | 
|  | 2667 | struct gem *gp = dev->priv; | 
|  | 2668 |  | 
|  | 2669 | /* Verify the settings we care about. */ | 
|  | 2670 | if (cmd->autoneg != AUTONEG_ENABLE && | 
|  | 2671 | cmd->autoneg != AUTONEG_DISABLE) | 
|  | 2672 | return -EINVAL; | 
|  | 2673 |  | 
|  | 2674 | if (cmd->autoneg == AUTONEG_ENABLE && | 
|  | 2675 | cmd->advertising == 0) | 
|  | 2676 | return -EINVAL; | 
|  | 2677 |  | 
|  | 2678 | if (cmd->autoneg == AUTONEG_DISABLE && | 
|  | 2679 | ((cmd->speed != SPEED_1000 && | 
|  | 2680 | cmd->speed != SPEED_100 && | 
|  | 2681 | cmd->speed != SPEED_10) || | 
|  | 2682 | (cmd->duplex != DUPLEX_HALF && | 
|  | 2683 | cmd->duplex != DUPLEX_FULL))) | 
|  | 2684 | return -EINVAL; | 
|  | 2685 |  | 
|  | 2686 | /* Apply settings and restart link process. */ | 
|  | 2687 | spin_lock_irq(&gp->lock); | 
|  | 2688 | gem_get_cell(gp); | 
|  | 2689 | gem_begin_auto_negotiation(gp, cmd); | 
|  | 2690 | gem_put_cell(gp); | 
|  | 2691 | spin_unlock_irq(&gp->lock); | 
|  | 2692 |  | 
|  | 2693 | return 0; | 
|  | 2694 | } | 
|  | 2695 |  | 
|  | 2696 | static int gem_nway_reset(struct net_device *dev) | 
|  | 2697 | { | 
|  | 2698 | struct gem *gp = dev->priv; | 
|  | 2699 |  | 
|  | 2700 | if (!gp->want_autoneg) | 
|  | 2701 | return -EINVAL; | 
|  | 2702 |  | 
|  | 2703 | /* Restart link process. */ | 
|  | 2704 | spin_lock_irq(&gp->lock); | 
|  | 2705 | gem_get_cell(gp); | 
|  | 2706 | gem_begin_auto_negotiation(gp, NULL); | 
|  | 2707 | gem_put_cell(gp); | 
|  | 2708 | spin_unlock_irq(&gp->lock); | 
|  | 2709 |  | 
|  | 2710 | return 0; | 
|  | 2711 | } | 
|  | 2712 |  | 
|  | 2713 | static u32 gem_get_msglevel(struct net_device *dev) | 
|  | 2714 | { | 
|  | 2715 | struct gem *gp = dev->priv; | 
|  | 2716 | return gp->msg_enable; | 
|  | 2717 | } | 
|  | 2718 |  | 
|  | 2719 | static void gem_set_msglevel(struct net_device *dev, u32 value) | 
|  | 2720 | { | 
|  | 2721 | struct gem *gp = dev->priv; | 
|  | 2722 | gp->msg_enable = value; | 
|  | 2723 | } | 
|  | 2724 |  | 
|  | 2725 |  | 
|  | 2726 | /* Add more when I understand how to program the chip */ | 
|  | 2727 | /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ | 
|  | 2728 |  | 
|  | 2729 | #define WOL_SUPPORTED_MASK	(WAKE_MAGIC) | 
|  | 2730 |  | 
|  | 2731 | static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 
|  | 2732 | { | 
|  | 2733 | struct gem *gp = dev->priv; | 
|  | 2734 |  | 
|  | 2735 | /* Add more when I understand how to program the chip */ | 
|  | 2736 | if (gp->has_wol) { | 
|  | 2737 | wol->supported = WOL_SUPPORTED_MASK; | 
|  | 2738 | wol->wolopts = gp->wake_on_lan; | 
|  | 2739 | } else { | 
|  | 2740 | wol->supported = 0; | 
|  | 2741 | wol->wolopts = 0; | 
|  | 2742 | } | 
|  | 2743 | } | 
|  | 2744 |  | 
|  | 2745 | static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 
|  | 2746 | { | 
|  | 2747 | struct gem *gp = dev->priv; | 
|  | 2748 |  | 
|  | 2749 | if (!gp->has_wol) | 
|  | 2750 | return -EOPNOTSUPP; | 
|  | 2751 | gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; | 
|  | 2752 | return 0; | 
|  | 2753 | } | 
|  | 2754 |  | 
|  | 2755 | static struct ethtool_ops gem_ethtool_ops = { | 
|  | 2756 | .get_drvinfo		= gem_get_drvinfo, | 
|  | 2757 | .get_link		= ethtool_op_get_link, | 
|  | 2758 | .get_settings		= gem_get_settings, | 
|  | 2759 | .set_settings		= gem_set_settings, | 
|  | 2760 | .nway_reset		= gem_nway_reset, | 
|  | 2761 | .get_msglevel		= gem_get_msglevel, | 
|  | 2762 | .set_msglevel		= gem_set_msglevel, | 
|  | 2763 | .get_wol		= gem_get_wol, | 
|  | 2764 | .set_wol		= gem_set_wol, | 
|  | 2765 | }; | 
|  | 2766 |  | 
|  | 2767 | static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 
|  | 2768 | { | 
|  | 2769 | struct gem *gp = dev->priv; | 
|  | 2770 | struct mii_ioctl_data *data = if_mii(ifr); | 
|  | 2771 | int rc = -EOPNOTSUPP; | 
|  | 2772 | unsigned long flags; | 
|  | 2773 |  | 
|  | 2774 | /* Hold the PM semaphore while doing ioctl's or we may collide | 
|  | 2775 | * with power management. | 
|  | 2776 | */ | 
|  | 2777 | down(&gp->pm_sem); | 
|  | 2778 |  | 
|  | 2779 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 2780 | gem_get_cell(gp); | 
|  | 2781 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 2782 |  | 
|  | 2783 | switch (cmd) { | 
|  | 2784 | case SIOCGMIIPHY:		/* Get address of MII PHY in use. */ | 
|  | 2785 | data->phy_id = gp->mii_phy_addr; | 
|  | 2786 | /* Fallthrough... */ | 
|  | 2787 |  | 
|  | 2788 | case SIOCGMIIREG:		/* Read MII PHY register. */ | 
|  | 2789 | if (!gp->running) | 
|  | 2790 | rc = -EAGAIN; | 
|  | 2791 | else { | 
|  | 2792 | data->val_out = __phy_read(gp, data->phy_id & 0x1f, | 
|  | 2793 | data->reg_num & 0x1f); | 
|  | 2794 | rc = 0; | 
|  | 2795 | } | 
|  | 2796 | break; | 
|  | 2797 |  | 
|  | 2798 | case SIOCSMIIREG:		/* Write MII PHY register. */ | 
|  | 2799 | if (!capable(CAP_NET_ADMIN)) | 
|  | 2800 | rc = -EPERM; | 
|  | 2801 | else if (!gp->running) | 
|  | 2802 | rc = -EAGAIN; | 
|  | 2803 | else { | 
|  | 2804 | __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, | 
|  | 2805 | data->val_in); | 
|  | 2806 | rc = 0; | 
|  | 2807 | } | 
|  | 2808 | break; | 
|  | 2809 | }; | 
|  | 2810 |  | 
|  | 2811 | spin_lock_irqsave(&gp->lock, flags); | 
|  | 2812 | gem_put_cell(gp); | 
|  | 2813 | spin_unlock_irqrestore(&gp->lock, flags); | 
|  | 2814 |  | 
|  | 2815 | up(&gp->pm_sem); | 
|  | 2816 |  | 
|  | 2817 | return rc; | 
|  | 2818 | } | 
|  | 2819 |  | 
|  | 2820 | #if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC)) | 
|  | 2821 | /* Fetch MAC address from vital product data of PCI ROM. */ | 
| Linus Torvalds | 4120b02 | 2005-09-11 09:26:20 -0700 | [diff] [blame] | 2822 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2823 | { | 
|  | 2824 | int this_offset; | 
|  | 2825 |  | 
|  | 2826 | for (this_offset = 0x20; this_offset < len; this_offset++) { | 
|  | 2827 | void __iomem *p = rom_base + this_offset; | 
|  | 2828 | int i; | 
|  | 2829 |  | 
|  | 2830 | if (readb(p + 0) != 0x90 || | 
|  | 2831 | readb(p + 1) != 0x00 || | 
|  | 2832 | readb(p + 2) != 0x09 || | 
|  | 2833 | readb(p + 3) != 0x4e || | 
|  | 2834 | readb(p + 4) != 0x41 || | 
|  | 2835 | readb(p + 5) != 0x06) | 
|  | 2836 | continue; | 
|  | 2837 |  | 
|  | 2838 | this_offset += 6; | 
|  | 2839 | p += 6; | 
|  | 2840 |  | 
|  | 2841 | for (i = 0; i < 6; i++) | 
|  | 2842 | dev_addr[i] = readb(p + i); | 
| Linus Torvalds | 4120b02 | 2005-09-11 09:26:20 -0700 | [diff] [blame] | 2843 | return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2844 | } | 
| Linus Torvalds | 4120b02 | 2005-09-11 09:26:20 -0700 | [diff] [blame] | 2845 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2846 | } | 
|  | 2847 |  | 
|  | 2848 | static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) | 
|  | 2849 | { | 
| Linus Torvalds | 4120b02 | 2005-09-11 09:26:20 -0700 | [diff] [blame] | 2850 | size_t size; | 
|  | 2851 | void __iomem *p = pci_map_rom(pdev, &size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2852 |  | 
| Linus Torvalds | 4120b02 | 2005-09-11 09:26:20 -0700 | [diff] [blame] | 2853 | if (p) { | 
|  | 2854 | int found; | 
|  | 2855 |  | 
|  | 2856 | found = readb(p) == 0x55 && | 
|  | 2857 | readb(p + 1) == 0xaa && | 
|  | 2858 | find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); | 
|  | 2859 | pci_unmap_rom(pdev, p); | 
|  | 2860 | if (found) | 
|  | 2861 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2862 | } | 
|  | 2863 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2864 | /* Sun MAC prefix then 3 random bytes. */ | 
|  | 2865 | dev_addr[0] = 0x08; | 
|  | 2866 | dev_addr[1] = 0x00; | 
|  | 2867 | dev_addr[2] = 0x20; | 
|  | 2868 | get_random_bytes(dev_addr + 3, 3); | 
|  | 2869 | return; | 
|  | 2870 | } | 
|  | 2871 | #endif /* not Sparc and not PPC */ | 
|  | 2872 |  | 
|  | 2873 | static int __devinit gem_get_device_address(struct gem *gp) | 
|  | 2874 | { | 
|  | 2875 | #if defined(__sparc__) || defined(CONFIG_PPC_PMAC) | 
|  | 2876 | struct net_device *dev = gp->dev; | 
|  | 2877 | #endif | 
|  | 2878 |  | 
|  | 2879 | #if defined(__sparc__) | 
|  | 2880 | struct pci_dev *pdev = gp->pdev; | 
|  | 2881 | struct pcidev_cookie *pcp = pdev->sysdata; | 
|  | 2882 | int node = -1; | 
|  | 2883 |  | 
|  | 2884 | if (pcp != NULL) { | 
|  | 2885 | node = pcp->prom_node; | 
|  | 2886 | if (prom_getproplen(node, "local-mac-address") == 6) | 
|  | 2887 | prom_getproperty(node, "local-mac-address", | 
|  | 2888 | dev->dev_addr, 6); | 
|  | 2889 | else | 
|  | 2890 | node = -1; | 
|  | 2891 | } | 
|  | 2892 | if (node == -1) | 
|  | 2893 | memcpy(dev->dev_addr, idprom->id_ethaddr, 6); | 
|  | 2894 | #elif defined(CONFIG_PPC_PMAC) | 
|  | 2895 | unsigned char *addr; | 
|  | 2896 |  | 
|  | 2897 | addr = get_property(gp->of_node, "local-mac-address", NULL); | 
|  | 2898 | if (addr == NULL) { | 
|  | 2899 | printk("\n"); | 
|  | 2900 | printk(KERN_ERR "%s: can't get mac-address\n", dev->name); | 
|  | 2901 | return -1; | 
|  | 2902 | } | 
|  | 2903 | memcpy(dev->dev_addr, addr, 6); | 
|  | 2904 | #else | 
|  | 2905 | get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); | 
|  | 2906 | #endif | 
|  | 2907 | return 0; | 
|  | 2908 | } | 
|  | 2909 |  | 
| Adrian Bunk | 1490439 | 2005-12-21 18:50:12 -0800 | [diff] [blame] | 2910 | static void gem_remove_one(struct pci_dev *pdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2911 | { | 
|  | 2912 | struct net_device *dev = pci_get_drvdata(pdev); | 
|  | 2913 |  | 
|  | 2914 | if (dev) { | 
|  | 2915 | struct gem *gp = dev->priv; | 
|  | 2916 |  | 
|  | 2917 | unregister_netdev(dev); | 
|  | 2918 |  | 
|  | 2919 | /* Stop the link timer */ | 
|  | 2920 | del_timer_sync(&gp->link_timer); | 
|  | 2921 |  | 
|  | 2922 | /* We shouldn't need any locking here */ | 
|  | 2923 | gem_get_cell(gp); | 
|  | 2924 |  | 
|  | 2925 | /* Wait for a pending reset task to complete */ | 
|  | 2926 | while (gp->reset_task_pending) | 
|  | 2927 | yield(); | 
|  | 2928 | flush_scheduled_work(); | 
|  | 2929 |  | 
|  | 2930 | /* Shut the PHY down */ | 
|  | 2931 | gem_stop_phy(gp, 0); | 
|  | 2932 |  | 
|  | 2933 | gem_put_cell(gp); | 
|  | 2934 |  | 
|  | 2935 | /* Make sure bus master is disabled */ | 
|  | 2936 | pci_disable_device(gp->pdev); | 
|  | 2937 |  | 
|  | 2938 | /* Free resources */ | 
|  | 2939 | pci_free_consistent(pdev, | 
|  | 2940 | sizeof(struct gem_init_block), | 
|  | 2941 | gp->init_block, | 
|  | 2942 | gp->gblock_dvma); | 
|  | 2943 | iounmap(gp->regs); | 
|  | 2944 | pci_release_regions(pdev); | 
|  | 2945 | free_netdev(dev); | 
|  | 2946 |  | 
|  | 2947 | pci_set_drvdata(pdev, NULL); | 
|  | 2948 | } | 
|  | 2949 | } | 
|  | 2950 |  | 
|  | 2951 | static int __devinit gem_init_one(struct pci_dev *pdev, | 
|  | 2952 | const struct pci_device_id *ent) | 
|  | 2953 | { | 
|  | 2954 | static int gem_version_printed = 0; | 
|  | 2955 | unsigned long gemreg_base, gemreg_len; | 
|  | 2956 | struct net_device *dev; | 
|  | 2957 | struct gem *gp; | 
|  | 2958 | int i, err, pci_using_dac; | 
|  | 2959 |  | 
|  | 2960 | if (gem_version_printed++ == 0) | 
|  | 2961 | printk(KERN_INFO "%s", version); | 
|  | 2962 |  | 
|  | 2963 | /* Apple gmac note: during probe, the chip is powered up by | 
|  | 2964 | * the arch code to allow the code below to work (and to let | 
|  | 2965 | * the chip be probed on the config space. It won't stay powered | 
|  | 2966 | * up until the interface is brought up however, so we can't rely | 
|  | 2967 | * on register configuration done at this point. | 
|  | 2968 | */ | 
|  | 2969 | err = pci_enable_device(pdev); | 
|  | 2970 | if (err) { | 
|  | 2971 | printk(KERN_ERR PFX "Cannot enable MMIO operation, " | 
|  | 2972 | "aborting.\n"); | 
|  | 2973 | return err; | 
|  | 2974 | } | 
|  | 2975 | pci_set_master(pdev); | 
|  | 2976 |  | 
|  | 2977 | /* Configure DMA attributes. */ | 
|  | 2978 |  | 
|  | 2979 | /* All of the GEM documentation states that 64-bit DMA addressing | 
|  | 2980 | * is fully supported and should work just fine.  However the | 
|  | 2981 | * front end for RIO based GEMs is different and only supports | 
|  | 2982 | * 32-bit addressing. | 
|  | 2983 | * | 
|  | 2984 | * For now we assume the various PPC GEMs are 32-bit only as well. | 
|  | 2985 | */ | 
|  | 2986 | if (pdev->vendor == PCI_VENDOR_ID_SUN && | 
|  | 2987 | pdev->device == PCI_DEVICE_ID_SUN_GEM && | 
| Domen Puncer | 1e7f0bd | 2005-06-26 18:22:14 -0400 | [diff] [blame] | 2988 | !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2989 | pci_using_dac = 1; | 
|  | 2990 | } else { | 
| Domen Puncer | 1e7f0bd | 2005-06-26 18:22:14 -0400 | [diff] [blame] | 2991 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2992 | if (err) { | 
|  | 2993 | printk(KERN_ERR PFX "No usable DMA configuration, " | 
|  | 2994 | "aborting.\n"); | 
|  | 2995 | goto err_disable_device; | 
|  | 2996 | } | 
|  | 2997 | pci_using_dac = 0; | 
|  | 2998 | } | 
|  | 2999 |  | 
|  | 3000 | gemreg_base = pci_resource_start(pdev, 0); | 
|  | 3001 | gemreg_len = pci_resource_len(pdev, 0); | 
|  | 3002 |  | 
|  | 3003 | if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { | 
|  | 3004 | printk(KERN_ERR PFX "Cannot find proper PCI device " | 
|  | 3005 | "base address, aborting.\n"); | 
|  | 3006 | err = -ENODEV; | 
|  | 3007 | goto err_disable_device; | 
|  | 3008 | } | 
|  | 3009 |  | 
|  | 3010 | dev = alloc_etherdev(sizeof(*gp)); | 
|  | 3011 | if (!dev) { | 
|  | 3012 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | 
|  | 3013 | err = -ENOMEM; | 
|  | 3014 | goto err_disable_device; | 
|  | 3015 | } | 
|  | 3016 | SET_MODULE_OWNER(dev); | 
|  | 3017 | SET_NETDEV_DEV(dev, &pdev->dev); | 
|  | 3018 |  | 
|  | 3019 | gp = dev->priv; | 
|  | 3020 |  | 
|  | 3021 | err = pci_request_regions(pdev, DRV_NAME); | 
|  | 3022 | if (err) { | 
|  | 3023 | printk(KERN_ERR PFX "Cannot obtain PCI resources, " | 
|  | 3024 | "aborting.\n"); | 
|  | 3025 | goto err_out_free_netdev; | 
|  | 3026 | } | 
|  | 3027 |  | 
|  | 3028 | gp->pdev = pdev; | 
|  | 3029 | dev->base_addr = (long) pdev; | 
|  | 3030 | gp->dev = dev; | 
|  | 3031 |  | 
|  | 3032 | gp->msg_enable = DEFAULT_MSG; | 
|  | 3033 |  | 
|  | 3034 | spin_lock_init(&gp->lock); | 
|  | 3035 | spin_lock_init(&gp->tx_lock); | 
|  | 3036 | init_MUTEX(&gp->pm_sem); | 
|  | 3037 |  | 
|  | 3038 | init_timer(&gp->link_timer); | 
|  | 3039 | gp->link_timer.function = gem_link_timer; | 
|  | 3040 | gp->link_timer.data = (unsigned long) gp; | 
|  | 3041 |  | 
|  | 3042 | INIT_WORK(&gp->reset_task, gem_reset_task, gp); | 
|  | 3043 |  | 
|  | 3044 | gp->lstate = link_down; | 
|  | 3045 | gp->timer_ticks = 0; | 
|  | 3046 | netif_carrier_off(dev); | 
|  | 3047 |  | 
|  | 3048 | gp->regs = ioremap(gemreg_base, gemreg_len); | 
|  | 3049 | if (gp->regs == 0UL) { | 
|  | 3050 | printk(KERN_ERR PFX "Cannot map device registers, " | 
|  | 3051 | "aborting.\n"); | 
|  | 3052 | err = -EIO; | 
|  | 3053 | goto err_out_free_res; | 
|  | 3054 | } | 
|  | 3055 |  | 
|  | 3056 | /* On Apple, we want a reference to the Open Firmware device-tree | 
|  | 3057 | * node. We use it for clock control. | 
|  | 3058 | */ | 
|  | 3059 | #ifdef CONFIG_PPC_PMAC | 
|  | 3060 | gp->of_node = pci_device_to_OF_node(pdev); | 
|  | 3061 | #endif | 
|  | 3062 |  | 
|  | 3063 | /* Only Apple version supports WOL afaik */ | 
|  | 3064 | if (pdev->vendor == PCI_VENDOR_ID_APPLE) | 
|  | 3065 | gp->has_wol = 1; | 
|  | 3066 |  | 
|  | 3067 | /* Make sure cell is enabled */ | 
|  | 3068 | gem_get_cell(gp); | 
|  | 3069 |  | 
|  | 3070 | /* Make sure everything is stopped and in init state */ | 
|  | 3071 | gem_reset(gp); | 
|  | 3072 |  | 
|  | 3073 | /* Fill up the mii_phy structure (even if we won't use it) */ | 
|  | 3074 | gp->phy_mii.dev = dev; | 
|  | 3075 | gp->phy_mii.mdio_read = _phy_read; | 
|  | 3076 | gp->phy_mii.mdio_write = _phy_write; | 
| Benjamin Herrenschmidt | 3c326fe | 2005-07-07 17:56:09 -0700 | [diff] [blame] | 3077 | #ifdef CONFIG_PPC_PMAC | 
|  | 3078 | gp->phy_mii.platform_data = gp->of_node; | 
|  | 3079 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3080 | /* By default, we start with autoneg */ | 
|  | 3081 | gp->want_autoneg = 1; | 
|  | 3082 |  | 
|  | 3083 | /* Check fifo sizes, PHY type, etc... */ | 
|  | 3084 | if (gem_check_invariants(gp)) { | 
|  | 3085 | err = -ENODEV; | 
|  | 3086 | goto err_out_iounmap; | 
|  | 3087 | } | 
|  | 3088 |  | 
|  | 3089 | /* It is guaranteed that the returned buffer will be at least | 
|  | 3090 | * PAGE_SIZE aligned. | 
|  | 3091 | */ | 
|  | 3092 | gp->init_block = (struct gem_init_block *) | 
|  | 3093 | pci_alloc_consistent(pdev, sizeof(struct gem_init_block), | 
|  | 3094 | &gp->gblock_dvma); | 
|  | 3095 | if (!gp->init_block) { | 
|  | 3096 | printk(KERN_ERR PFX "Cannot allocate init block, " | 
|  | 3097 | "aborting.\n"); | 
|  | 3098 | err = -ENOMEM; | 
|  | 3099 | goto err_out_iounmap; | 
|  | 3100 | } | 
|  | 3101 |  | 
|  | 3102 | if (gem_get_device_address(gp)) | 
|  | 3103 | goto err_out_free_consistent; | 
|  | 3104 |  | 
|  | 3105 | dev->open = gem_open; | 
|  | 3106 | dev->stop = gem_close; | 
|  | 3107 | dev->hard_start_xmit = gem_start_xmit; | 
|  | 3108 | dev->get_stats = gem_get_stats; | 
|  | 3109 | dev->set_multicast_list = gem_set_multicast; | 
|  | 3110 | dev->do_ioctl = gem_ioctl; | 
|  | 3111 | dev->poll = gem_poll; | 
|  | 3112 | dev->weight = 64; | 
|  | 3113 | dev->ethtool_ops = &gem_ethtool_ops; | 
|  | 3114 | dev->tx_timeout = gem_tx_timeout; | 
|  | 3115 | dev->watchdog_timeo = 5 * HZ; | 
|  | 3116 | dev->change_mtu = gem_change_mtu; | 
|  | 3117 | dev->irq = pdev->irq; | 
|  | 3118 | dev->dma = 0; | 
|  | 3119 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
|  | 3120 | dev->poll_controller = gem_poll_controller; | 
|  | 3121 | #endif | 
|  | 3122 |  | 
|  | 3123 | /* Set that now, in case PM kicks in now */ | 
|  | 3124 | pci_set_drvdata(pdev, dev); | 
|  | 3125 |  | 
|  | 3126 | /* Detect & init PHY, start autoneg, we release the cell now | 
|  | 3127 | * too, it will be managed by whoever needs it | 
|  | 3128 | */ | 
|  | 3129 | gem_init_phy(gp); | 
|  | 3130 |  | 
|  | 3131 | spin_lock_irq(&gp->lock); | 
|  | 3132 | gem_put_cell(gp); | 
|  | 3133 | spin_unlock_irq(&gp->lock); | 
|  | 3134 |  | 
|  | 3135 | /* Register with kernel */ | 
|  | 3136 | if (register_netdev(dev)) { | 
|  | 3137 | printk(KERN_ERR PFX "Cannot register net device, " | 
|  | 3138 | "aborting.\n"); | 
|  | 3139 | err = -ENOMEM; | 
|  | 3140 | goto err_out_free_consistent; | 
|  | 3141 | } | 
|  | 3142 |  | 
|  | 3143 | printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ", | 
|  | 3144 | dev->name); | 
|  | 3145 | for (i = 0; i < 6; i++) | 
|  | 3146 | printk("%2.2x%c", dev->dev_addr[i], | 
|  | 3147 | i == 5 ? ' ' : ':'); | 
|  | 3148 | printk("\n"); | 
|  | 3149 |  | 
|  | 3150 | if (gp->phy_type == phy_mii_mdio0 || | 
|  | 3151 | gp->phy_type == phy_mii_mdio1) | 
|  | 3152 | printk(KERN_INFO "%s: Found %s PHY\n", dev->name, | 
|  | 3153 | gp->phy_mii.def ? gp->phy_mii.def->name : "no"); | 
|  | 3154 |  | 
|  | 3155 | /* GEM can do it all... */ | 
|  | 3156 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; | 
|  | 3157 | if (pci_using_dac) | 
|  | 3158 | dev->features |= NETIF_F_HIGHDMA; | 
|  | 3159 |  | 
|  | 3160 | return 0; | 
|  | 3161 |  | 
|  | 3162 | err_out_free_consistent: | 
|  | 3163 | gem_remove_one(pdev); | 
|  | 3164 | err_out_iounmap: | 
|  | 3165 | gem_put_cell(gp); | 
|  | 3166 | iounmap(gp->regs); | 
|  | 3167 |  | 
|  | 3168 | err_out_free_res: | 
|  | 3169 | pci_release_regions(pdev); | 
|  | 3170 |  | 
|  | 3171 | err_out_free_netdev: | 
|  | 3172 | free_netdev(dev); | 
|  | 3173 | err_disable_device: | 
|  | 3174 | pci_disable_device(pdev); | 
|  | 3175 | return err; | 
|  | 3176 |  | 
|  | 3177 | } | 
|  | 3178 |  | 
|  | 3179 |  | 
|  | 3180 | static struct pci_driver gem_driver = { | 
|  | 3181 | .name		= GEM_MODULE_NAME, | 
|  | 3182 | .id_table	= gem_pci_tbl, | 
|  | 3183 | .probe		= gem_init_one, | 
| Adrian Bunk | 1490439 | 2005-12-21 18:50:12 -0800 | [diff] [blame] | 3184 | .remove		= gem_remove_one, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3185 | #ifdef CONFIG_PM | 
|  | 3186 | .suspend	= gem_suspend, | 
|  | 3187 | .resume		= gem_resume, | 
|  | 3188 | #endif /* CONFIG_PM */ | 
|  | 3189 | }; | 
|  | 3190 |  | 
|  | 3191 | static int __init gem_init(void) | 
|  | 3192 | { | 
|  | 3193 | return pci_module_init(&gem_driver); | 
|  | 3194 | } | 
|  | 3195 |  | 
|  | 3196 | static void __exit gem_cleanup(void) | 
|  | 3197 | { | 
|  | 3198 | pci_unregister_driver(&gem_driver); | 
|  | 3199 | } | 
|  | 3200 |  | 
|  | 3201 | module_init(gem_init); | 
|  | 3202 | module_exit(gem_cleanup); |