| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | * Atmel MACB Ethernet Controller driver | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2004-2006 Atmel Corporation | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #include <linux/clk.h> | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/moduleparam.h> | 
|  | 14 | #include <linux/kernel.h> | 
|  | 15 | #include <linux/types.h> | 
|  | 16 | #include <linux/slab.h> | 
|  | 17 | #include <linux/init.h> | 
|  | 18 | #include <linux/netdevice.h> | 
|  | 19 | #include <linux/etherdevice.h> | 
|  | 20 | #include <linux/mii.h> | 
|  | 21 | #include <linux/mutex.h> | 
|  | 22 | #include <linux/dma-mapping.h> | 
|  | 23 | #include <linux/ethtool.h> | 
|  | 24 | #include <linux/platform_device.h> | 
|  | 25 |  | 
|  | 26 | #include <asm/arch/board.h> | 
|  | 27 |  | 
|  | 28 | #include "macb.h" | 
|  | 29 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 30 | #define RX_BUFFER_SIZE		128 | 
|  | 31 | #define RX_RING_SIZE		512 | 
|  | 32 | #define RX_RING_BYTES		(sizeof(struct dma_desc) * RX_RING_SIZE) | 
|  | 33 |  | 
|  | 34 | /* Make the IP header word-aligned (the ethernet header is 14 bytes) */ | 
|  | 35 | #define RX_OFFSET		2 | 
|  | 36 |  | 
|  | 37 | #define TX_RING_SIZE		128 | 
|  | 38 | #define DEF_TX_RING_PENDING	(TX_RING_SIZE - 1) | 
|  | 39 | #define TX_RING_BYTES		(sizeof(struct dma_desc) * TX_RING_SIZE) | 
|  | 40 |  | 
|  | 41 | #define TX_RING_GAP(bp)						\ | 
|  | 42 | (TX_RING_SIZE - (bp)->tx_pending) | 
|  | 43 | #define TX_BUFFS_AVAIL(bp)					\ | 
|  | 44 | (((bp)->tx_tail <= (bp)->tx_head) ?			\ | 
|  | 45 | (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head :	\ | 
|  | 46 | (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) | 
|  | 47 | #define NEXT_TX(n)		(((n) + 1) & (TX_RING_SIZE - 1)) | 
|  | 48 |  | 
|  | 49 | #define NEXT_RX(n)		(((n) + 1) & (RX_RING_SIZE - 1)) | 
|  | 50 |  | 
|  | 51 | /* minimum number of free TX descriptors before waking up TX process */ | 
|  | 52 | #define MACB_TX_WAKEUP_THRESH	(TX_RING_SIZE / 4) | 
|  | 53 |  | 
|  | 54 | #define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\ | 
|  | 55 | | MACB_BIT(ISR_ROVR)) | 
|  | 56 |  | 
|  | 57 | static void __macb_set_hwaddr(struct macb *bp) | 
|  | 58 | { | 
|  | 59 | u32 bottom; | 
|  | 60 | u16 top; | 
|  | 61 |  | 
|  | 62 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); | 
|  | 63 | macb_writel(bp, SA1B, bottom); | 
|  | 64 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); | 
|  | 65 | macb_writel(bp, SA1T, top); | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | static void __init macb_get_hwaddr(struct macb *bp) | 
|  | 69 | { | 
|  | 70 | u32 bottom; | 
|  | 71 | u16 top; | 
|  | 72 | u8 addr[6]; | 
|  | 73 |  | 
|  | 74 | bottom = macb_readl(bp, SA1B); | 
|  | 75 | top = macb_readl(bp, SA1T); | 
|  | 76 |  | 
|  | 77 | addr[0] = bottom & 0xff; | 
|  | 78 | addr[1] = (bottom >> 8) & 0xff; | 
|  | 79 | addr[2] = (bottom >> 16) & 0xff; | 
|  | 80 | addr[3] = (bottom >> 24) & 0xff; | 
|  | 81 | addr[4] = top & 0xff; | 
|  | 82 | addr[5] = (top >> 8) & 0xff; | 
|  | 83 |  | 
|  | 84 | if (is_valid_ether_addr(addr)) | 
|  | 85 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); | 
|  | 86 | } | 
|  | 87 |  | 
|  | 88 | static void macb_enable_mdio(struct macb *bp) | 
|  | 89 | { | 
|  | 90 | unsigned long flags; | 
|  | 91 | u32 reg; | 
|  | 92 |  | 
|  | 93 | spin_lock_irqsave(&bp->lock, flags); | 
|  | 94 | reg = macb_readl(bp, NCR); | 
|  | 95 | reg |= MACB_BIT(MPE); | 
|  | 96 | macb_writel(bp, NCR, reg); | 
|  | 97 | macb_writel(bp, IER, MACB_BIT(MFD)); | 
|  | 98 | spin_unlock_irqrestore(&bp->lock, flags); | 
|  | 99 | } | 
|  | 100 |  | 
|  | 101 | static void macb_disable_mdio(struct macb *bp) | 
|  | 102 | { | 
|  | 103 | unsigned long flags; | 
|  | 104 | u32 reg; | 
|  | 105 |  | 
|  | 106 | spin_lock_irqsave(&bp->lock, flags); | 
|  | 107 | reg = macb_readl(bp, NCR); | 
|  | 108 | reg &= ~MACB_BIT(MPE); | 
|  | 109 | macb_writel(bp, NCR, reg); | 
|  | 110 | macb_writel(bp, IDR, MACB_BIT(MFD)); | 
|  | 111 | spin_unlock_irqrestore(&bp->lock, flags); | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | static int macb_mdio_read(struct net_device *dev, int phy_id, int location) | 
|  | 115 | { | 
|  | 116 | struct macb *bp = netdev_priv(dev); | 
|  | 117 | int value; | 
|  | 118 |  | 
|  | 119 | mutex_lock(&bp->mdio_mutex); | 
|  | 120 |  | 
|  | 121 | macb_enable_mdio(bp); | 
|  | 122 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | 
|  | 123 | | MACB_BF(RW, MACB_MAN_READ) | 
|  | 124 | | MACB_BF(PHYA, phy_id) | 
|  | 125 | | MACB_BF(REGA, location) | 
|  | 126 | | MACB_BF(CODE, MACB_MAN_CODE))); | 
|  | 127 |  | 
|  | 128 | wait_for_completion(&bp->mdio_complete); | 
|  | 129 |  | 
|  | 130 | value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); | 
|  | 131 | macb_disable_mdio(bp); | 
|  | 132 | mutex_unlock(&bp->mdio_mutex); | 
|  | 133 |  | 
|  | 134 | return value; | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | static void macb_mdio_write(struct net_device *dev, int phy_id, | 
|  | 138 | int location, int val) | 
|  | 139 | { | 
|  | 140 | struct macb *bp = netdev_priv(dev); | 
|  | 141 |  | 
|  | 142 | dev_dbg(&bp->pdev->dev, "mdio_write %02x:%02x <- %04x\n", | 
|  | 143 | phy_id, location, val); | 
|  | 144 |  | 
|  | 145 | mutex_lock(&bp->mdio_mutex); | 
|  | 146 | macb_enable_mdio(bp); | 
|  | 147 |  | 
|  | 148 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | 
|  | 149 | | MACB_BF(RW, MACB_MAN_WRITE) | 
|  | 150 | | MACB_BF(PHYA, phy_id) | 
|  | 151 | | MACB_BF(REGA, location) | 
|  | 152 | | MACB_BF(CODE, MACB_MAN_CODE) | 
|  | 153 | | MACB_BF(DATA, val))); | 
|  | 154 |  | 
|  | 155 | wait_for_completion(&bp->mdio_complete); | 
|  | 156 |  | 
|  | 157 | macb_disable_mdio(bp); | 
|  | 158 | mutex_unlock(&bp->mdio_mutex); | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | static int macb_phy_probe(struct macb *bp) | 
|  | 162 | { | 
|  | 163 | int phy_address; | 
|  | 164 | u16 phyid1, phyid2; | 
|  | 165 |  | 
|  | 166 | for (phy_address = 0; phy_address < 32; phy_address++) { | 
|  | 167 | phyid1 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID1); | 
|  | 168 | phyid2 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID2); | 
|  | 169 |  | 
|  | 170 | if (phyid1 != 0xffff && phyid1 != 0x0000 | 
|  | 171 | && phyid2 != 0xffff && phyid2 != 0x0000) | 
|  | 172 | break; | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | if (phy_address == 32) | 
|  | 176 | return -ENODEV; | 
|  | 177 |  | 
|  | 178 | dev_info(&bp->pdev->dev, | 
|  | 179 | "detected PHY at address %d (ID %04x:%04x)\n", | 
|  | 180 | phy_address, phyid1, phyid2); | 
|  | 181 |  | 
|  | 182 | bp->mii.phy_id = phy_address; | 
|  | 183 | return 0; | 
|  | 184 | } | 
|  | 185 |  | 
|  | 186 | static void macb_set_media(struct macb *bp, int media) | 
|  | 187 | { | 
|  | 188 | u32 reg; | 
|  | 189 |  | 
|  | 190 | spin_lock_irq(&bp->lock); | 
|  | 191 | reg = macb_readl(bp, NCFGR); | 
|  | 192 | reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); | 
|  | 193 | if (media & (ADVERTISE_100HALF | ADVERTISE_100FULL)) | 
|  | 194 | reg |= MACB_BIT(SPD); | 
|  | 195 | if (media & ADVERTISE_FULL) | 
|  | 196 | reg |= MACB_BIT(FD); | 
|  | 197 | macb_writel(bp, NCFGR, reg); | 
|  | 198 | spin_unlock_irq(&bp->lock); | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | static void macb_check_media(struct macb *bp, int ok_to_print, int init_media) | 
|  | 202 | { | 
|  | 203 | struct mii_if_info *mii = &bp->mii; | 
|  | 204 | unsigned int old_carrier, new_carrier; | 
|  | 205 | int advertise, lpa, media, duplex; | 
|  | 206 |  | 
|  | 207 | /* if forced media, go no further */ | 
|  | 208 | if (mii->force_media) | 
|  | 209 | return; | 
|  | 210 |  | 
|  | 211 | /* check current and old link status */ | 
|  | 212 | old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; | 
|  | 213 | new_carrier = (unsigned int) mii_link_ok(mii); | 
|  | 214 |  | 
|  | 215 | /* if carrier state did not change, assume nothing else did */ | 
|  | 216 | if (!init_media && old_carrier == new_carrier) | 
|  | 217 | return; | 
|  | 218 |  | 
|  | 219 | /* no carrier, nothing much to do */ | 
|  | 220 | if (!new_carrier) { | 
|  | 221 | netif_carrier_off(mii->dev); | 
|  | 222 | printk(KERN_INFO "%s: link down\n", mii->dev->name); | 
|  | 223 | return; | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | /* | 
|  | 227 | * we have carrier, see who's on the other end | 
|  | 228 | */ | 
|  | 229 | netif_carrier_on(mii->dev); | 
|  | 230 |  | 
|  | 231 | /* get MII advertise and LPA values */ | 
|  | 232 | if (!init_media && mii->advertising) { | 
|  | 233 | advertise = mii->advertising; | 
|  | 234 | } else { | 
|  | 235 | advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); | 
|  | 236 | mii->advertising = advertise; | 
|  | 237 | } | 
|  | 238 | lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); | 
|  | 239 |  | 
|  | 240 | /* figure out media and duplex from advertise and LPA values */ | 
|  | 241 | media = mii_nway_result(lpa & advertise); | 
|  | 242 | duplex = (media & ADVERTISE_FULL) ? 1 : 0; | 
|  | 243 |  | 
|  | 244 | if (ok_to_print) | 
|  | 245 | printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n", | 
|  | 246 | mii->dev->name, | 
|  | 247 | media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10", | 
|  | 248 | duplex ? "full" : "half", lpa); | 
|  | 249 |  | 
|  | 250 | mii->full_duplex = duplex; | 
|  | 251 |  | 
|  | 252 | /* Let the MAC know about the new link state */ | 
|  | 253 | macb_set_media(bp, media); | 
|  | 254 | } | 
|  | 255 |  | 
|  | 256 | static void macb_update_stats(struct macb *bp) | 
|  | 257 | { | 
|  | 258 | u32 __iomem *reg = bp->regs + MACB_PFR; | 
|  | 259 | u32 *p = &bp->hw_stats.rx_pause_frames; | 
|  | 260 | u32 *end = &bp->hw_stats.tx_pause_frames + 1; | 
|  | 261 |  | 
|  | 262 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | 
|  | 263 |  | 
|  | 264 | for(; p < end; p++, reg++) | 
| Haavard Skinnemoen | 0f0d84e | 2006-12-08 14:38:30 +0100 | [diff] [blame] | 265 | *p += __raw_readl(reg); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 266 | } | 
|  | 267 |  | 
| Haavard Skinnemoen | d836cae | 2006-12-08 14:37:35 +0100 | [diff] [blame] | 268 | static void macb_periodic_task(struct work_struct *work) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 269 | { | 
| Haavard Skinnemoen | d836cae | 2006-12-08 14:37:35 +0100 | [diff] [blame] | 270 | struct macb *bp = container_of(work, struct macb, periodic_task.work); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 271 |  | 
|  | 272 | macb_update_stats(bp); | 
|  | 273 | macb_check_media(bp, 1, 0); | 
|  | 274 |  | 
|  | 275 | schedule_delayed_work(&bp->periodic_task, HZ); | 
|  | 276 | } | 
|  | 277 |  | 
|  | 278 | static void macb_tx(struct macb *bp) | 
|  | 279 | { | 
|  | 280 | unsigned int tail; | 
|  | 281 | unsigned int head; | 
|  | 282 | u32 status; | 
|  | 283 |  | 
|  | 284 | status = macb_readl(bp, TSR); | 
|  | 285 | macb_writel(bp, TSR, status); | 
|  | 286 |  | 
|  | 287 | dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n", | 
|  | 288 | (unsigned long)status); | 
|  | 289 |  | 
|  | 290 | if (status & MACB_BIT(UND)) { | 
|  | 291 | printk(KERN_ERR "%s: TX underrun, resetting buffers\n", | 
|  | 292 | bp->dev->name); | 
|  | 293 | bp->tx_head = bp->tx_tail = 0; | 
|  | 294 | } | 
|  | 295 |  | 
|  | 296 | if (!(status & MACB_BIT(COMP))) | 
|  | 297 | /* | 
|  | 298 | * This may happen when a buffer becomes complete | 
|  | 299 | * between reading the ISR and scanning the | 
|  | 300 | * descriptors.  Nothing to worry about. | 
|  | 301 | */ | 
|  | 302 | return; | 
|  | 303 |  | 
|  | 304 | head = bp->tx_head; | 
|  | 305 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { | 
|  | 306 | struct ring_info *rp = &bp->tx_skb[tail]; | 
|  | 307 | struct sk_buff *skb = rp->skb; | 
|  | 308 | u32 bufstat; | 
|  | 309 |  | 
|  | 310 | BUG_ON(skb == NULL); | 
|  | 311 |  | 
|  | 312 | rmb(); | 
|  | 313 | bufstat = bp->tx_ring[tail].ctrl; | 
|  | 314 |  | 
|  | 315 | if (!(bufstat & MACB_BIT(TX_USED))) | 
|  | 316 | break; | 
|  | 317 |  | 
|  | 318 | dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n", | 
|  | 319 | tail, skb->data); | 
|  | 320 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, | 
|  | 321 | DMA_TO_DEVICE); | 
|  | 322 | bp->stats.tx_packets++; | 
|  | 323 | bp->stats.tx_bytes += skb->len; | 
|  | 324 | rp->skb = NULL; | 
|  | 325 | dev_kfree_skb_irq(skb); | 
|  | 326 | } | 
|  | 327 |  | 
|  | 328 | bp->tx_tail = tail; | 
|  | 329 | if (netif_queue_stopped(bp->dev) && | 
|  | 330 | TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) | 
|  | 331 | netif_wake_queue(bp->dev); | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | 
|  | 335 | unsigned int last_frag) | 
|  | 336 | { | 
|  | 337 | unsigned int len; | 
|  | 338 | unsigned int frag; | 
|  | 339 | unsigned int offset = 0; | 
|  | 340 | struct sk_buff *skb; | 
|  | 341 |  | 
|  | 342 | len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); | 
|  | 343 |  | 
|  | 344 | dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n", | 
|  | 345 | first_frag, last_frag, len); | 
|  | 346 |  | 
|  | 347 | skb = dev_alloc_skb(len + RX_OFFSET); | 
|  | 348 | if (!skb) { | 
|  | 349 | bp->stats.rx_dropped++; | 
|  | 350 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { | 
|  | 351 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 
|  | 352 | if (frag == last_frag) | 
|  | 353 | break; | 
|  | 354 | } | 
|  | 355 | wmb(); | 
|  | 356 | return 1; | 
|  | 357 | } | 
|  | 358 |  | 
|  | 359 | skb_reserve(skb, RX_OFFSET); | 
|  | 360 | skb->dev = bp->dev; | 
|  | 361 | skb->ip_summed = CHECKSUM_NONE; | 
|  | 362 | skb_put(skb, len); | 
|  | 363 |  | 
|  | 364 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { | 
|  | 365 | unsigned int frag_len = RX_BUFFER_SIZE; | 
|  | 366 |  | 
|  | 367 | if (offset + frag_len > len) { | 
|  | 368 | BUG_ON(frag != last_frag); | 
|  | 369 | frag_len = len - offset; | 
|  | 370 | } | 
|  | 371 | memcpy(skb->data + offset, | 
|  | 372 | bp->rx_buffers + (RX_BUFFER_SIZE * frag), | 
|  | 373 | frag_len); | 
|  | 374 | offset += RX_BUFFER_SIZE; | 
|  | 375 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 
|  | 376 | wmb(); | 
|  | 377 |  | 
|  | 378 | if (frag == last_frag) | 
|  | 379 | break; | 
|  | 380 | } | 
|  | 381 |  | 
|  | 382 | skb->protocol = eth_type_trans(skb, bp->dev); | 
|  | 383 |  | 
|  | 384 | bp->stats.rx_packets++; | 
|  | 385 | bp->stats.rx_bytes += len; | 
|  | 386 | bp->dev->last_rx = jiffies; | 
|  | 387 | dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n", | 
|  | 388 | skb->len, skb->csum); | 
|  | 389 | netif_receive_skb(skb); | 
|  | 390 |  | 
|  | 391 | return 0; | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | /* Mark DMA descriptors from begin up to and not including end as unused */ | 
|  | 395 | static void discard_partial_frame(struct macb *bp, unsigned int begin, | 
|  | 396 | unsigned int end) | 
|  | 397 | { | 
|  | 398 | unsigned int frag; | 
|  | 399 |  | 
|  | 400 | for (frag = begin; frag != end; frag = NEXT_RX(frag)) | 
|  | 401 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 
|  | 402 | wmb(); | 
|  | 403 |  | 
|  | 404 | /* | 
|  | 405 | * When this happens, the hardware stats registers for | 
|  | 406 | * whatever caused this is updated, so we don't have to record | 
|  | 407 | * anything. | 
|  | 408 | */ | 
|  | 409 | } | 
|  | 410 |  | 
|  | 411 | static int macb_rx(struct macb *bp, int budget) | 
|  | 412 | { | 
|  | 413 | int received = 0; | 
|  | 414 | unsigned int tail = bp->rx_tail; | 
|  | 415 | int first_frag = -1; | 
|  | 416 |  | 
|  | 417 | for (; budget > 0; tail = NEXT_RX(tail)) { | 
|  | 418 | u32 addr, ctrl; | 
|  | 419 |  | 
|  | 420 | rmb(); | 
|  | 421 | addr = bp->rx_ring[tail].addr; | 
|  | 422 | ctrl = bp->rx_ring[tail].ctrl; | 
|  | 423 |  | 
|  | 424 | if (!(addr & MACB_BIT(RX_USED))) | 
|  | 425 | break; | 
|  | 426 |  | 
|  | 427 | if (ctrl & MACB_BIT(RX_SOF)) { | 
|  | 428 | if (first_frag != -1) | 
|  | 429 | discard_partial_frame(bp, first_frag, tail); | 
|  | 430 | first_frag = tail; | 
|  | 431 | } | 
|  | 432 |  | 
|  | 433 | if (ctrl & MACB_BIT(RX_EOF)) { | 
|  | 434 | int dropped; | 
|  | 435 | BUG_ON(first_frag == -1); | 
|  | 436 |  | 
|  | 437 | dropped = macb_rx_frame(bp, first_frag, tail); | 
|  | 438 | first_frag = -1; | 
|  | 439 | if (!dropped) { | 
|  | 440 | received++; | 
|  | 441 | budget--; | 
|  | 442 | } | 
|  | 443 | } | 
|  | 444 | } | 
|  | 445 |  | 
|  | 446 | if (first_frag != -1) | 
|  | 447 | bp->rx_tail = first_frag; | 
|  | 448 | else | 
|  | 449 | bp->rx_tail = tail; | 
|  | 450 |  | 
|  | 451 | return received; | 
|  | 452 | } | 
|  | 453 |  | 
|  | 454 | static int macb_poll(struct net_device *dev, int *budget) | 
|  | 455 | { | 
|  | 456 | struct macb *bp = netdev_priv(dev); | 
|  | 457 | int orig_budget, work_done, retval = 0; | 
|  | 458 | u32 status; | 
|  | 459 |  | 
|  | 460 | status = macb_readl(bp, RSR); | 
|  | 461 | macb_writel(bp, RSR, status); | 
|  | 462 |  | 
|  | 463 | if (!status) { | 
|  | 464 | /* | 
|  | 465 | * This may happen if an interrupt was pending before | 
|  | 466 | * this function was called last time, and no packets | 
|  | 467 | * have been received since. | 
|  | 468 | */ | 
|  | 469 | netif_rx_complete(dev); | 
|  | 470 | goto out; | 
|  | 471 | } | 
|  | 472 |  | 
|  | 473 | dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", | 
|  | 474 | (unsigned long)status, *budget); | 
|  | 475 |  | 
|  | 476 | if (!(status & MACB_BIT(REC))) { | 
|  | 477 | dev_warn(&bp->pdev->dev, | 
|  | 478 | "No RX buffers complete, status = %02lx\n", | 
|  | 479 | (unsigned long)status); | 
|  | 480 | netif_rx_complete(dev); | 
|  | 481 | goto out; | 
|  | 482 | } | 
|  | 483 |  | 
|  | 484 | orig_budget = *budget; | 
|  | 485 | if (orig_budget > dev->quota) | 
|  | 486 | orig_budget = dev->quota; | 
|  | 487 |  | 
|  | 488 | work_done = macb_rx(bp, orig_budget); | 
|  | 489 | if (work_done < orig_budget) { | 
|  | 490 | netif_rx_complete(dev); | 
|  | 491 | retval = 0; | 
|  | 492 | } else { | 
|  | 493 | retval = 1; | 
|  | 494 | } | 
|  | 495 |  | 
|  | 496 | /* | 
|  | 497 | * We've done what we can to clean the buffers. Make sure we | 
|  | 498 | * get notified when new packets arrive. | 
|  | 499 | */ | 
|  | 500 | out: | 
|  | 501 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); | 
|  | 502 |  | 
|  | 503 | /* TODO: Handle errors */ | 
|  | 504 |  | 
|  | 505 | return retval; | 
|  | 506 | } | 
|  | 507 |  | 
|  | 508 | static irqreturn_t macb_interrupt(int irq, void *dev_id) | 
|  | 509 | { | 
|  | 510 | struct net_device *dev = dev_id; | 
|  | 511 | struct macb *bp = netdev_priv(dev); | 
|  | 512 | u32 status; | 
|  | 513 |  | 
|  | 514 | status = macb_readl(bp, ISR); | 
|  | 515 |  | 
|  | 516 | if (unlikely(!status)) | 
|  | 517 | return IRQ_NONE; | 
|  | 518 |  | 
|  | 519 | spin_lock(&bp->lock); | 
|  | 520 |  | 
|  | 521 | while (status) { | 
|  | 522 | if (status & MACB_BIT(MFD)) | 
|  | 523 | complete(&bp->mdio_complete); | 
|  | 524 |  | 
|  | 525 | /* close possible race with dev_close */ | 
|  | 526 | if (unlikely(!netif_running(dev))) { | 
|  | 527 | macb_writel(bp, IDR, ~0UL); | 
|  | 528 | break; | 
|  | 529 | } | 
|  | 530 |  | 
|  | 531 | if (status & MACB_RX_INT_FLAGS) { | 
|  | 532 | if (netif_rx_schedule_prep(dev)) { | 
|  | 533 | /* | 
|  | 534 | * There's no point taking any more interrupts | 
|  | 535 | * until we have processed the buffers | 
|  | 536 | */ | 
|  | 537 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); | 
|  | 538 | dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n"); | 
|  | 539 | __netif_rx_schedule(dev); | 
|  | 540 | } | 
|  | 541 | } | 
|  | 542 |  | 
|  | 543 | if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND))) | 
|  | 544 | macb_tx(bp); | 
|  | 545 |  | 
|  | 546 | /* | 
|  | 547 | * Link change detection isn't possible with RMII, so we'll | 
|  | 548 | * add that if/when we get our hands on a full-blown MII PHY. | 
|  | 549 | */ | 
|  | 550 |  | 
|  | 551 | if (status & MACB_BIT(HRESP)) { | 
|  | 552 | /* | 
|  | 553 | * TODO: Reset the hardware, and maybe move the printk | 
|  | 554 | * to a lower-priority context as well (work queue?) | 
|  | 555 | */ | 
|  | 556 | printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n", | 
|  | 557 | dev->name); | 
|  | 558 | } | 
|  | 559 |  | 
|  | 560 | status = macb_readl(bp, ISR); | 
|  | 561 | } | 
|  | 562 |  | 
|  | 563 | spin_unlock(&bp->lock); | 
|  | 564 |  | 
|  | 565 | return IRQ_HANDLED; | 
|  | 566 | } | 
|  | 567 |  | 
|  | 568 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | 
|  | 569 | { | 
|  | 570 | struct macb *bp = netdev_priv(dev); | 
|  | 571 | dma_addr_t mapping; | 
|  | 572 | unsigned int len, entry; | 
|  | 573 | u32 ctrl; | 
|  | 574 |  | 
|  | 575 | #ifdef DEBUG | 
|  | 576 | int i; | 
|  | 577 | dev_dbg(&bp->pdev->dev, | 
|  | 578 | "start_xmit: len %u head %p data %p tail %p end %p\n", | 
|  | 579 | skb->len, skb->head, skb->data, skb->tail, skb->end); | 
|  | 580 | dev_dbg(&bp->pdev->dev, | 
|  | 581 | "data:"); | 
|  | 582 | for (i = 0; i < 16; i++) | 
|  | 583 | printk(" %02x", (unsigned int)skb->data[i]); | 
|  | 584 | printk("\n"); | 
|  | 585 | #endif | 
|  | 586 |  | 
|  | 587 | len = skb->len; | 
|  | 588 | spin_lock_irq(&bp->lock); | 
|  | 589 |  | 
|  | 590 | /* This is a hard error, log it. */ | 
|  | 591 | if (TX_BUFFS_AVAIL(bp) < 1) { | 
|  | 592 | netif_stop_queue(dev); | 
|  | 593 | spin_unlock_irq(&bp->lock); | 
|  | 594 | dev_err(&bp->pdev->dev, | 
|  | 595 | "BUG! Tx Ring full when queue awake!\n"); | 
|  | 596 | dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", | 
|  | 597 | bp->tx_head, bp->tx_tail); | 
|  | 598 | return 1; | 
|  | 599 | } | 
|  | 600 |  | 
|  | 601 | entry = bp->tx_head; | 
|  | 602 | dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry); | 
|  | 603 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | 
|  | 604 | len, DMA_TO_DEVICE); | 
|  | 605 | bp->tx_skb[entry].skb = skb; | 
|  | 606 | bp->tx_skb[entry].mapping = mapping; | 
|  | 607 | dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n", | 
|  | 608 | skb->data, (unsigned long)mapping); | 
|  | 609 |  | 
|  | 610 | ctrl = MACB_BF(TX_FRMLEN, len); | 
|  | 611 | ctrl |= MACB_BIT(TX_LAST); | 
|  | 612 | if (entry == (TX_RING_SIZE - 1)) | 
|  | 613 | ctrl |= MACB_BIT(TX_WRAP); | 
|  | 614 |  | 
|  | 615 | bp->tx_ring[entry].addr = mapping; | 
|  | 616 | bp->tx_ring[entry].ctrl = ctrl; | 
|  | 617 | wmb(); | 
|  | 618 |  | 
|  | 619 | entry = NEXT_TX(entry); | 
|  | 620 | bp->tx_head = entry; | 
|  | 621 |  | 
|  | 622 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); | 
|  | 623 |  | 
|  | 624 | if (TX_BUFFS_AVAIL(bp) < 1) | 
|  | 625 | netif_stop_queue(dev); | 
|  | 626 |  | 
|  | 627 | spin_unlock_irq(&bp->lock); | 
|  | 628 |  | 
|  | 629 | dev->trans_start = jiffies; | 
|  | 630 |  | 
|  | 631 | return 0; | 
|  | 632 | } | 
|  | 633 |  | 
|  | 634 | static void macb_free_consistent(struct macb *bp) | 
|  | 635 | { | 
|  | 636 | if (bp->tx_skb) { | 
|  | 637 | kfree(bp->tx_skb); | 
|  | 638 | bp->tx_skb = NULL; | 
|  | 639 | } | 
|  | 640 | if (bp->rx_ring) { | 
|  | 641 | dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, | 
|  | 642 | bp->rx_ring, bp->rx_ring_dma); | 
|  | 643 | bp->rx_ring = NULL; | 
|  | 644 | } | 
|  | 645 | if (bp->tx_ring) { | 
|  | 646 | dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, | 
|  | 647 | bp->tx_ring, bp->tx_ring_dma); | 
|  | 648 | bp->tx_ring = NULL; | 
|  | 649 | } | 
|  | 650 | if (bp->rx_buffers) { | 
|  | 651 | dma_free_coherent(&bp->pdev->dev, | 
|  | 652 | RX_RING_SIZE * RX_BUFFER_SIZE, | 
|  | 653 | bp->rx_buffers, bp->rx_buffers_dma); | 
|  | 654 | bp->rx_buffers = NULL; | 
|  | 655 | } | 
|  | 656 | } | 
|  | 657 |  | 
|  | 658 | static int macb_alloc_consistent(struct macb *bp) | 
|  | 659 | { | 
|  | 660 | int size; | 
|  | 661 |  | 
|  | 662 | size = TX_RING_SIZE * sizeof(struct ring_info); | 
|  | 663 | bp->tx_skb = kmalloc(size, GFP_KERNEL); | 
|  | 664 | if (!bp->tx_skb) | 
|  | 665 | goto out_err; | 
|  | 666 |  | 
|  | 667 | size = RX_RING_BYTES; | 
|  | 668 | bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | 
|  | 669 | &bp->rx_ring_dma, GFP_KERNEL); | 
|  | 670 | if (!bp->rx_ring) | 
|  | 671 | goto out_err; | 
|  | 672 | dev_dbg(&bp->pdev->dev, | 
|  | 673 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", | 
|  | 674 | size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); | 
|  | 675 |  | 
|  | 676 | size = TX_RING_BYTES; | 
|  | 677 | bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | 
|  | 678 | &bp->tx_ring_dma, GFP_KERNEL); | 
|  | 679 | if (!bp->tx_ring) | 
|  | 680 | goto out_err; | 
|  | 681 | dev_dbg(&bp->pdev->dev, | 
|  | 682 | "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", | 
|  | 683 | size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); | 
|  | 684 |  | 
|  | 685 | size = RX_RING_SIZE * RX_BUFFER_SIZE; | 
|  | 686 | bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, | 
|  | 687 | &bp->rx_buffers_dma, GFP_KERNEL); | 
|  | 688 | if (!bp->rx_buffers) | 
|  | 689 | goto out_err; | 
|  | 690 | dev_dbg(&bp->pdev->dev, | 
|  | 691 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", | 
|  | 692 | size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); | 
|  | 693 |  | 
|  | 694 | return 0; | 
|  | 695 |  | 
|  | 696 | out_err: | 
|  | 697 | macb_free_consistent(bp); | 
|  | 698 | return -ENOMEM; | 
|  | 699 | } | 
|  | 700 |  | 
|  | 701 | static void macb_init_rings(struct macb *bp) | 
|  | 702 | { | 
|  | 703 | int i; | 
|  | 704 | dma_addr_t addr; | 
|  | 705 |  | 
|  | 706 | addr = bp->rx_buffers_dma; | 
|  | 707 | for (i = 0; i < RX_RING_SIZE; i++) { | 
|  | 708 | bp->rx_ring[i].addr = addr; | 
|  | 709 | bp->rx_ring[i].ctrl = 0; | 
|  | 710 | addr += RX_BUFFER_SIZE; | 
|  | 711 | } | 
|  | 712 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); | 
|  | 713 |  | 
|  | 714 | for (i = 0; i < TX_RING_SIZE; i++) { | 
|  | 715 | bp->tx_ring[i].addr = 0; | 
|  | 716 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); | 
|  | 717 | } | 
|  | 718 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); | 
|  | 719 |  | 
|  | 720 | bp->rx_tail = bp->tx_head = bp->tx_tail = 0; | 
|  | 721 | } | 
|  | 722 |  | 
|  | 723 | static void macb_reset_hw(struct macb *bp) | 
|  | 724 | { | 
|  | 725 | /* Make sure we have the write buffer for ourselves */ | 
|  | 726 | wmb(); | 
|  | 727 |  | 
|  | 728 | /* | 
|  | 729 | * Disable RX and TX (XXX: Should we halt the transmission | 
|  | 730 | * more gracefully?) | 
|  | 731 | */ | 
|  | 732 | macb_writel(bp, NCR, 0); | 
|  | 733 |  | 
|  | 734 | /* Clear the stats registers (XXX: Update stats first?) */ | 
|  | 735 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); | 
|  | 736 |  | 
|  | 737 | /* Clear all status flags */ | 
|  | 738 | macb_writel(bp, TSR, ~0UL); | 
|  | 739 | macb_writel(bp, RSR, ~0UL); | 
|  | 740 |  | 
|  | 741 | /* Disable all interrupts */ | 
|  | 742 | macb_writel(bp, IDR, ~0UL); | 
|  | 743 | macb_readl(bp, ISR); | 
|  | 744 | } | 
|  | 745 |  | 
|  | 746 | static void macb_init_hw(struct macb *bp) | 
|  | 747 | { | 
|  | 748 | u32 config; | 
|  | 749 |  | 
|  | 750 | macb_reset_hw(bp); | 
|  | 751 | __macb_set_hwaddr(bp); | 
|  | 752 |  | 
|  | 753 | config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); | 
|  | 754 | config |= MACB_BIT(PAE);		/* PAuse Enable */ | 
|  | 755 | config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */ | 
|  | 756 | if (bp->dev->flags & IFF_PROMISC) | 
|  | 757 | config |= MACB_BIT(CAF);	/* Copy All Frames */ | 
|  | 758 | if (!(bp->dev->flags & IFF_BROADCAST)) | 
|  | 759 | config |= MACB_BIT(NBC);	/* No BroadCast */ | 
|  | 760 | macb_writel(bp, NCFGR, config); | 
|  | 761 |  | 
|  | 762 | /* Initialize TX and RX buffers */ | 
|  | 763 | macb_writel(bp, RBQP, bp->rx_ring_dma); | 
|  | 764 | macb_writel(bp, TBQP, bp->tx_ring_dma); | 
|  | 765 |  | 
|  | 766 | /* Enable TX and RX */ | 
|  | 767 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE)); | 
|  | 768 |  | 
|  | 769 | /* Enable interrupts */ | 
|  | 770 | macb_writel(bp, IER, (MACB_BIT(RCOMP) | 
|  | 771 | | MACB_BIT(RXUBR) | 
|  | 772 | | MACB_BIT(ISR_TUND) | 
|  | 773 | | MACB_BIT(ISR_RLE) | 
|  | 774 | | MACB_BIT(TXERR) | 
|  | 775 | | MACB_BIT(TCOMP) | 
|  | 776 | | MACB_BIT(ISR_ROVR) | 
|  | 777 | | MACB_BIT(HRESP))); | 
|  | 778 | } | 
|  | 779 |  | 
|  | 780 | static void macb_init_phy(struct net_device *dev) | 
|  | 781 | { | 
|  | 782 | struct macb *bp = netdev_priv(dev); | 
|  | 783 |  | 
|  | 784 | /* Set some reasonable default settings */ | 
|  | 785 | macb_mdio_write(dev, bp->mii.phy_id, MII_ADVERTISE, | 
|  | 786 | ADVERTISE_CSMA | ADVERTISE_ALL); | 
|  | 787 | macb_mdio_write(dev, bp->mii.phy_id, MII_BMCR, | 
|  | 788 | (BMCR_SPEED100 | BMCR_ANENABLE | 
|  | 789 | | BMCR_ANRESTART | BMCR_FULLDPLX)); | 
|  | 790 | } | 
|  | 791 |  | 
|  | 792 | static int macb_open(struct net_device *dev) | 
|  | 793 | { | 
|  | 794 | struct macb *bp = netdev_priv(dev); | 
|  | 795 | int err; | 
|  | 796 |  | 
|  | 797 | dev_dbg(&bp->pdev->dev, "open\n"); | 
|  | 798 |  | 
|  | 799 | if (!is_valid_ether_addr(dev->dev_addr)) | 
|  | 800 | return -EADDRNOTAVAIL; | 
|  | 801 |  | 
|  | 802 | err = macb_alloc_consistent(bp); | 
|  | 803 | if (err) { | 
|  | 804 | printk(KERN_ERR | 
|  | 805 | "%s: Unable to allocate DMA memory (error %d)\n", | 
|  | 806 | dev->name, err); | 
|  | 807 | return err; | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | macb_init_rings(bp); | 
|  | 811 | macb_init_hw(bp); | 
|  | 812 | macb_init_phy(dev); | 
|  | 813 |  | 
|  | 814 | macb_check_media(bp, 1, 1); | 
|  | 815 | netif_start_queue(dev); | 
|  | 816 |  | 
|  | 817 | schedule_delayed_work(&bp->periodic_task, HZ); | 
|  | 818 |  | 
|  | 819 | return 0; | 
|  | 820 | } | 
|  | 821 |  | 
|  | 822 | static int macb_close(struct net_device *dev) | 
|  | 823 | { | 
|  | 824 | struct macb *bp = netdev_priv(dev); | 
|  | 825 | unsigned long flags; | 
|  | 826 |  | 
|  | 827 | cancel_rearming_delayed_work(&bp->periodic_task); | 
|  | 828 |  | 
|  | 829 | netif_stop_queue(dev); | 
|  | 830 |  | 
|  | 831 | spin_lock_irqsave(&bp->lock, flags); | 
|  | 832 | macb_reset_hw(bp); | 
|  | 833 | netif_carrier_off(dev); | 
|  | 834 | spin_unlock_irqrestore(&bp->lock, flags); | 
|  | 835 |  | 
|  | 836 | macb_free_consistent(bp); | 
|  | 837 |  | 
|  | 838 | return 0; | 
|  | 839 | } | 
|  | 840 |  | 
|  | 841 | static struct net_device_stats *macb_get_stats(struct net_device *dev) | 
|  | 842 | { | 
|  | 843 | struct macb *bp = netdev_priv(dev); | 
|  | 844 | struct net_device_stats *nstat = &bp->stats; | 
|  | 845 | struct macb_stats *hwstat = &bp->hw_stats; | 
|  | 846 |  | 
|  | 847 | /* Convert HW stats into netdevice stats */ | 
|  | 848 | nstat->rx_errors = (hwstat->rx_fcs_errors + | 
|  | 849 | hwstat->rx_align_errors + | 
|  | 850 | hwstat->rx_resource_errors + | 
|  | 851 | hwstat->rx_overruns + | 
|  | 852 | hwstat->rx_oversize_pkts + | 
|  | 853 | hwstat->rx_jabbers + | 
|  | 854 | hwstat->rx_undersize_pkts + | 
|  | 855 | hwstat->sqe_test_errors + | 
|  | 856 | hwstat->rx_length_mismatch); | 
|  | 857 | nstat->tx_errors = (hwstat->tx_late_cols + | 
|  | 858 | hwstat->tx_excessive_cols + | 
|  | 859 | hwstat->tx_underruns + | 
|  | 860 | hwstat->tx_carrier_errors); | 
|  | 861 | nstat->collisions = (hwstat->tx_single_cols + | 
|  | 862 | hwstat->tx_multiple_cols + | 
|  | 863 | hwstat->tx_excessive_cols); | 
|  | 864 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | 
|  | 865 | hwstat->rx_jabbers + | 
|  | 866 | hwstat->rx_undersize_pkts + | 
|  | 867 | hwstat->rx_length_mismatch); | 
|  | 868 | nstat->rx_over_errors = hwstat->rx_resource_errors; | 
|  | 869 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; | 
|  | 870 | nstat->rx_frame_errors = hwstat->rx_align_errors; | 
|  | 871 | nstat->rx_fifo_errors = hwstat->rx_overruns; | 
|  | 872 | /* XXX: What does "missed" mean? */ | 
|  | 873 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; | 
|  | 874 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; | 
|  | 875 | nstat->tx_fifo_errors = hwstat->tx_underruns; | 
|  | 876 | /* Don't know about heartbeat or window errors... */ | 
|  | 877 |  | 
|  | 878 | return nstat; | 
|  | 879 | } | 
|  | 880 |  | 
|  | 881 | static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
|  | 882 | { | 
|  | 883 | struct macb *bp = netdev_priv(dev); | 
|  | 884 | int ret; | 
|  | 885 | unsigned long flags; | 
|  | 886 |  | 
|  | 887 | spin_lock_irqsave(&bp->lock, flags); | 
|  | 888 | ret = mii_ethtool_gset(&bp->mii, cmd); | 
|  | 889 | spin_unlock_irqrestore(&bp->lock, flags); | 
|  | 890 |  | 
|  | 891 | return ret; | 
|  | 892 | } | 
|  | 893 |  | 
|  | 894 | static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
|  | 895 | { | 
|  | 896 | struct macb *bp = netdev_priv(dev); | 
|  | 897 | int ret; | 
|  | 898 | unsigned long flags; | 
|  | 899 |  | 
|  | 900 | spin_lock_irqsave(&bp->lock, flags); | 
|  | 901 | ret = mii_ethtool_sset(&bp->mii, cmd); | 
|  | 902 | spin_unlock_irqrestore(&bp->lock, flags); | 
|  | 903 |  | 
|  | 904 | return ret; | 
|  | 905 | } | 
|  | 906 |  | 
|  | 907 | static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 
|  | 908 | { | 
|  | 909 | struct macb *bp = netdev_priv(dev); | 
|  | 910 |  | 
|  | 911 | strcpy(info->driver, bp->pdev->dev.driver->name); | 
|  | 912 | strcpy(info->version, "$Revision: 1.14 $"); | 
|  | 913 | strcpy(info->bus_info, bp->pdev->dev.bus_id); | 
|  | 914 | } | 
|  | 915 |  | 
|  | 916 | static int macb_nway_reset(struct net_device *dev) | 
|  | 917 | { | 
|  | 918 | struct macb *bp = netdev_priv(dev); | 
|  | 919 | return mii_nway_restart(&bp->mii); | 
|  | 920 | } | 
|  | 921 |  | 
|  | 922 | static struct ethtool_ops macb_ethtool_ops = { | 
|  | 923 | .get_settings		= macb_get_settings, | 
|  | 924 | .set_settings		= macb_set_settings, | 
|  | 925 | .get_drvinfo		= macb_get_drvinfo, | 
|  | 926 | .nway_reset		= macb_nway_reset, | 
|  | 927 | .get_link		= ethtool_op_get_link, | 
|  | 928 | }; | 
|  | 929 |  | 
|  | 930 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 
|  | 931 | { | 
|  | 932 | struct macb *bp = netdev_priv(dev); | 
|  | 933 | int ret; | 
|  | 934 | unsigned long flags; | 
|  | 935 |  | 
|  | 936 | if (!netif_running(dev)) | 
|  | 937 | return -EINVAL; | 
|  | 938 |  | 
|  | 939 | spin_lock_irqsave(&bp->lock, flags); | 
|  | 940 | ret = generic_mii_ioctl(&bp->mii, if_mii(rq), cmd, NULL); | 
|  | 941 | spin_unlock_irqrestore(&bp->lock, flags); | 
|  | 942 |  | 
|  | 943 | return ret; | 
|  | 944 | } | 
|  | 945 |  | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 946 | static ssize_t macb_mii_show(const struct device *_dev, char *buf, | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 947 | unsigned long addr) | 
|  | 948 | { | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 949 | struct net_device *dev = to_net_dev(_dev); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 950 | struct macb *bp = netdev_priv(dev); | 
|  | 951 | ssize_t ret = -EINVAL; | 
|  | 952 |  | 
|  | 953 | if (netif_running(dev)) { | 
|  | 954 | int value; | 
|  | 955 | value = macb_mdio_read(dev, bp->mii.phy_id, addr); | 
|  | 956 | ret = sprintf(buf, "0x%04x\n", (uint16_t)value); | 
|  | 957 | } | 
|  | 958 |  | 
|  | 959 | return ret; | 
|  | 960 | } | 
|  | 961 |  | 
|  | 962 | #define MII_ENTRY(name, addr)					\ | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 963 | static ssize_t show_##name(struct device *_dev,			\ | 
|  | 964 | struct device_attribute *attr,	\ | 
|  | 965 | char *buf)				\ | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 966 | {								\ | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 967 | return macb_mii_show(_dev, buf, addr);			\ | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 968 | }								\ | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 969 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 970 |  | 
|  | 971 | MII_ENTRY(bmcr, MII_BMCR); | 
|  | 972 | MII_ENTRY(bmsr, MII_BMSR); | 
|  | 973 | MII_ENTRY(physid1, MII_PHYSID1); | 
|  | 974 | MII_ENTRY(physid2, MII_PHYSID2); | 
|  | 975 | MII_ENTRY(advertise, MII_ADVERTISE); | 
|  | 976 | MII_ENTRY(lpa, MII_LPA); | 
|  | 977 | MII_ENTRY(expansion, MII_EXPANSION); | 
|  | 978 |  | 
|  | 979 | static struct attribute *macb_mii_attrs[] = { | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 980 | &dev_attr_bmcr.attr, | 
|  | 981 | &dev_attr_bmsr.attr, | 
|  | 982 | &dev_attr_physid1.attr, | 
|  | 983 | &dev_attr_physid2.attr, | 
|  | 984 | &dev_attr_advertise.attr, | 
|  | 985 | &dev_attr_lpa.attr, | 
|  | 986 | &dev_attr_expansion.attr, | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 987 | NULL, | 
|  | 988 | }; | 
|  | 989 |  | 
|  | 990 | static struct attribute_group macb_mii_group = { | 
|  | 991 | .name	= "mii", | 
|  | 992 | .attrs	= macb_mii_attrs, | 
|  | 993 | }; | 
|  | 994 |  | 
|  | 995 | static void macb_unregister_sysfs(struct net_device *net) | 
|  | 996 | { | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 997 | struct device *_dev = &net->dev; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 998 |  | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 999 | sysfs_remove_group(&_dev->kobj, &macb_mii_group); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1000 | } | 
|  | 1001 |  | 
|  | 1002 | static int macb_register_sysfs(struct net_device *net) | 
|  | 1003 | { | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1004 | struct device *_dev = &net->dev; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1005 | int ret; | 
|  | 1006 |  | 
| Greg Kroah-Hartman | 43cb76d | 2002-04-09 12:14:34 -0700 | [diff] [blame] | 1007 | ret = sysfs_create_group(&_dev->kobj, &macb_mii_group); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1008 | if (ret) | 
|  | 1009 | printk(KERN_WARNING | 
|  | 1010 | "%s: sysfs mii attribute registration failed: %d\n", | 
|  | 1011 | net->name, ret); | 
|  | 1012 | return ret; | 
|  | 1013 | } | 
|  | 1014 | static int __devinit macb_probe(struct platform_device *pdev) | 
|  | 1015 | { | 
|  | 1016 | struct eth_platform_data *pdata; | 
|  | 1017 | struct resource *regs; | 
|  | 1018 | struct net_device *dev; | 
|  | 1019 | struct macb *bp; | 
|  | 1020 | unsigned long pclk_hz; | 
|  | 1021 | u32 config; | 
|  | 1022 | int err = -ENXIO; | 
|  | 1023 |  | 
|  | 1024 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
|  | 1025 | if (!regs) { | 
|  | 1026 | dev_err(&pdev->dev, "no mmio resource defined\n"); | 
|  | 1027 | goto err_out; | 
|  | 1028 | } | 
|  | 1029 |  | 
|  | 1030 | err = -ENOMEM; | 
|  | 1031 | dev = alloc_etherdev(sizeof(*bp)); | 
|  | 1032 | if (!dev) { | 
|  | 1033 | dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); | 
|  | 1034 | goto err_out; | 
|  | 1035 | } | 
|  | 1036 |  | 
|  | 1037 | SET_MODULE_OWNER(dev); | 
|  | 1038 | SET_NETDEV_DEV(dev, &pdev->dev); | 
|  | 1039 |  | 
|  | 1040 | /* TODO: Actually, we have some interesting features... */ | 
|  | 1041 | dev->features |= 0; | 
|  | 1042 |  | 
|  | 1043 | bp = netdev_priv(dev); | 
|  | 1044 | bp->pdev = pdev; | 
|  | 1045 | bp->dev = dev; | 
|  | 1046 |  | 
|  | 1047 | spin_lock_init(&bp->lock); | 
|  | 1048 |  | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1049 | #if defined(CONFIG_ARCH_AT91) | 
|  | 1050 | bp->pclk = clk_get(&pdev->dev, "macb_clk"); | 
|  | 1051 | if (IS_ERR(bp->pclk)) { | 
|  | 1052 | dev_err(&pdev->dev, "failed to get macb_clk\n"); | 
|  | 1053 | goto err_out_free_dev; | 
|  | 1054 | } | 
|  | 1055 | clk_enable(bp->pclk); | 
|  | 1056 | #else | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1057 | bp->pclk = clk_get(&pdev->dev, "pclk"); | 
|  | 1058 | if (IS_ERR(bp->pclk)) { | 
|  | 1059 | dev_err(&pdev->dev, "failed to get pclk\n"); | 
|  | 1060 | goto err_out_free_dev; | 
|  | 1061 | } | 
|  | 1062 | bp->hclk = clk_get(&pdev->dev, "hclk"); | 
|  | 1063 | if (IS_ERR(bp->hclk)) { | 
|  | 1064 | dev_err(&pdev->dev, "failed to get hclk\n"); | 
|  | 1065 | goto err_out_put_pclk; | 
|  | 1066 | } | 
|  | 1067 |  | 
|  | 1068 | clk_enable(bp->pclk); | 
|  | 1069 | clk_enable(bp->hclk); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1070 | #endif | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1071 |  | 
|  | 1072 | bp->regs = ioremap(regs->start, regs->end - regs->start + 1); | 
|  | 1073 | if (!bp->regs) { | 
|  | 1074 | dev_err(&pdev->dev, "failed to map registers, aborting.\n"); | 
|  | 1075 | err = -ENOMEM; | 
|  | 1076 | goto err_out_disable_clocks; | 
|  | 1077 | } | 
|  | 1078 |  | 
|  | 1079 | dev->irq = platform_get_irq(pdev, 0); | 
|  | 1080 | err = request_irq(dev->irq, macb_interrupt, SA_SAMPLE_RANDOM, | 
|  | 1081 | dev->name, dev); | 
|  | 1082 | if (err) { | 
|  | 1083 | printk(KERN_ERR | 
|  | 1084 | "%s: Unable to request IRQ %d (error %d)\n", | 
|  | 1085 | dev->name, dev->irq, err); | 
|  | 1086 | goto err_out_iounmap; | 
|  | 1087 | } | 
|  | 1088 |  | 
|  | 1089 | dev->open = macb_open; | 
|  | 1090 | dev->stop = macb_close; | 
|  | 1091 | dev->hard_start_xmit = macb_start_xmit; | 
|  | 1092 | dev->get_stats = macb_get_stats; | 
|  | 1093 | dev->do_ioctl = macb_ioctl; | 
|  | 1094 | dev->poll = macb_poll; | 
|  | 1095 | dev->weight = 64; | 
|  | 1096 | dev->ethtool_ops = &macb_ethtool_ops; | 
|  | 1097 |  | 
|  | 1098 | dev->base_addr = regs->start; | 
|  | 1099 |  | 
| Haavard Skinnemoen | d836cae | 2006-12-08 14:37:35 +0100 | [diff] [blame] | 1100 | INIT_DELAYED_WORK(&bp->periodic_task, macb_periodic_task); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1101 | mutex_init(&bp->mdio_mutex); | 
|  | 1102 | init_completion(&bp->mdio_complete); | 
|  | 1103 |  | 
|  | 1104 | /* Set MII management clock divider */ | 
|  | 1105 | pclk_hz = clk_get_rate(bp->pclk); | 
|  | 1106 | if (pclk_hz <= 20000000) | 
|  | 1107 | config = MACB_BF(CLK, MACB_CLK_DIV8); | 
|  | 1108 | else if (pclk_hz <= 40000000) | 
|  | 1109 | config = MACB_BF(CLK, MACB_CLK_DIV16); | 
|  | 1110 | else if (pclk_hz <= 80000000) | 
|  | 1111 | config = MACB_BF(CLK, MACB_CLK_DIV32); | 
|  | 1112 | else | 
|  | 1113 | config = MACB_BF(CLK, MACB_CLK_DIV64); | 
|  | 1114 | macb_writel(bp, NCFGR, config); | 
|  | 1115 |  | 
|  | 1116 | bp->mii.dev = dev; | 
|  | 1117 | bp->mii.mdio_read = macb_mdio_read; | 
|  | 1118 | bp->mii.mdio_write = macb_mdio_write; | 
|  | 1119 | bp->mii.phy_id_mask = 0x1f; | 
|  | 1120 | bp->mii.reg_num_mask = 0x1f; | 
|  | 1121 |  | 
|  | 1122 | macb_get_hwaddr(bp); | 
|  | 1123 | err = macb_phy_probe(bp); | 
|  | 1124 | if (err) { | 
|  | 1125 | dev_err(&pdev->dev, "Failed to detect PHY, aborting.\n"); | 
|  | 1126 | goto err_out_free_irq; | 
|  | 1127 | } | 
|  | 1128 |  | 
|  | 1129 | pdata = pdev->dev.platform_data; | 
|  | 1130 | if (pdata && pdata->is_rmii) | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1131 | #if defined(CONFIG_ARCH_AT91) | 
|  | 1132 | macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) ); | 
|  | 1133 | #else | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1134 | macb_writel(bp, USRIO, 0); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1135 | #endif | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1136 | else | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1137 | #if defined(CONFIG_ARCH_AT91) | 
|  | 1138 | macb_writel(bp, USRIO, MACB_BIT(CLKEN)); | 
|  | 1139 | #else | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1140 | macb_writel(bp, USRIO, MACB_BIT(MII)); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1141 | #endif | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1142 |  | 
|  | 1143 | bp->tx_pending = DEF_TX_RING_PENDING; | 
|  | 1144 |  | 
|  | 1145 | err = register_netdev(dev); | 
|  | 1146 | if (err) { | 
|  | 1147 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | 
|  | 1148 | goto err_out_free_irq; | 
|  | 1149 | } | 
|  | 1150 |  | 
|  | 1151 | platform_set_drvdata(pdev, dev); | 
|  | 1152 |  | 
|  | 1153 | macb_register_sysfs(dev); | 
|  | 1154 |  | 
|  | 1155 | printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d " | 
|  | 1156 | "(%02x:%02x:%02x:%02x:%02x:%02x)\n", | 
|  | 1157 | dev->name, dev->base_addr, dev->irq, | 
|  | 1158 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | 
|  | 1159 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | 
|  | 1160 |  | 
|  | 1161 | return 0; | 
|  | 1162 |  | 
|  | 1163 | err_out_free_irq: | 
|  | 1164 | free_irq(dev->irq, dev); | 
|  | 1165 | err_out_iounmap: | 
|  | 1166 | iounmap(bp->regs); | 
|  | 1167 | err_out_disable_clocks: | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1168 | #ifndef CONFIG_ARCH_AT91 | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1169 | clk_disable(bp->hclk); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1170 | clk_put(bp->hclk); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1171 | #endif | 
|  | 1172 | clk_disable(bp->pclk); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1173 | err_out_put_pclk: | 
|  | 1174 | clk_put(bp->pclk); | 
|  | 1175 | err_out_free_dev: | 
|  | 1176 | free_netdev(dev); | 
|  | 1177 | err_out: | 
|  | 1178 | platform_set_drvdata(pdev, NULL); | 
|  | 1179 | return err; | 
|  | 1180 | } | 
|  | 1181 |  | 
|  | 1182 | static int __devexit macb_remove(struct platform_device *pdev) | 
|  | 1183 | { | 
|  | 1184 | struct net_device *dev; | 
|  | 1185 | struct macb *bp; | 
|  | 1186 |  | 
|  | 1187 | dev = platform_get_drvdata(pdev); | 
|  | 1188 |  | 
|  | 1189 | if (dev) { | 
|  | 1190 | bp = netdev_priv(dev); | 
|  | 1191 | macb_unregister_sysfs(dev); | 
|  | 1192 | unregister_netdev(dev); | 
|  | 1193 | free_irq(dev->irq, dev); | 
|  | 1194 | iounmap(bp->regs); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1195 | #ifndef CONFIG_ARCH_AT91 | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1196 | clk_disable(bp->hclk); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1197 | clk_put(bp->hclk); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1198 | #endif | 
|  | 1199 | clk_disable(bp->pclk); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1200 | clk_put(bp->pclk); | 
|  | 1201 | free_netdev(dev); | 
|  | 1202 | platform_set_drvdata(pdev, NULL); | 
|  | 1203 | } | 
|  | 1204 |  | 
|  | 1205 | return 0; | 
|  | 1206 | } | 
|  | 1207 |  | 
|  | 1208 | static struct platform_driver macb_driver = { | 
|  | 1209 | .probe		= macb_probe, | 
|  | 1210 | .remove		= __devexit_p(macb_remove), | 
|  | 1211 | .driver		= { | 
|  | 1212 | .name		= "macb", | 
|  | 1213 | }, | 
|  | 1214 | }; | 
|  | 1215 |  | 
|  | 1216 | static int __init macb_init(void) | 
|  | 1217 | { | 
|  | 1218 | return platform_driver_register(&macb_driver); | 
|  | 1219 | } | 
|  | 1220 |  | 
|  | 1221 | static void __exit macb_exit(void) | 
|  | 1222 | { | 
|  | 1223 | platform_driver_unregister(&macb_driver); | 
|  | 1224 | } | 
|  | 1225 |  | 
|  | 1226 | module_init(macb_init); | 
|  | 1227 | module_exit(macb_exit); | 
|  | 1228 |  | 
|  | 1229 | MODULE_LICENSE("GPL"); | 
|  | 1230 | MODULE_DESCRIPTION("Atmel MACB Ethernet driver"); | 
|  | 1231 | MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); |