| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Atmel MACB Ethernet Controller driver | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2004-2006 Atmel Corporation | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License version 2 as | 
 | 8 |  * published by the Free Software Foundation. | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/clk.h> | 
 | 12 | #include <linux/module.h> | 
 | 13 | #include <linux/moduleparam.h> | 
 | 14 | #include <linux/kernel.h> | 
 | 15 | #include <linux/types.h> | 
 | 16 | #include <linux/slab.h> | 
 | 17 | #include <linux/init.h> | 
 | 18 | #include <linux/netdevice.h> | 
 | 19 | #include <linux/etherdevice.h> | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 20 | #include <linux/dma-mapping.h> | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 21 | #include <linux/platform_device.h> | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 22 | #include <linux/phy.h> | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 23 |  | 
| Russell King | a09e64f | 2008-08-05 16:14:15 +0100 | [diff] [blame] | 24 | #include <mach/board.h> | 
 | 25 | #include <mach/cpu.h> | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 26 |  | 
 | 27 | #include "macb.h" | 
 | 28 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 29 | #define RX_BUFFER_SIZE		128 | 
 | 30 | #define RX_RING_SIZE		512 | 
 | 31 | #define RX_RING_BYTES		(sizeof(struct dma_desc) * RX_RING_SIZE) | 
 | 32 |  | 
 | 33 | /* Make the IP header word-aligned (the ethernet header is 14 bytes) */ | 
 | 34 | #define RX_OFFSET		2 | 
 | 35 |  | 
 | 36 | #define TX_RING_SIZE		128 | 
 | 37 | #define DEF_TX_RING_PENDING	(TX_RING_SIZE - 1) | 
 | 38 | #define TX_RING_BYTES		(sizeof(struct dma_desc) * TX_RING_SIZE) | 
 | 39 |  | 
 | 40 | #define TX_RING_GAP(bp)						\ | 
 | 41 | 	(TX_RING_SIZE - (bp)->tx_pending) | 
 | 42 | #define TX_BUFFS_AVAIL(bp)					\ | 
 | 43 | 	(((bp)->tx_tail <= (bp)->tx_head) ?			\ | 
 | 44 | 	 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head :	\ | 
 | 45 | 	 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) | 
 | 46 | #define NEXT_TX(n)		(((n) + 1) & (TX_RING_SIZE - 1)) | 
 | 47 |  | 
 | 48 | #define NEXT_RX(n)		(((n) + 1) & (RX_RING_SIZE - 1)) | 
 | 49 |  | 
 | 50 | /* minimum number of free TX descriptors before waking up TX process */ | 
 | 51 | #define MACB_TX_WAKEUP_THRESH	(TX_RING_SIZE / 4) | 
 | 52 |  | 
 | 53 | #define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\ | 
 | 54 | 				 | MACB_BIT(ISR_ROVR)) | 
 | 55 |  | 
 | 56 | static void __macb_set_hwaddr(struct macb *bp) | 
 | 57 | { | 
 | 58 | 	u32 bottom; | 
 | 59 | 	u16 top; | 
 | 60 |  | 
 | 61 | 	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); | 
 | 62 | 	macb_writel(bp, SA1B, bottom); | 
 | 63 | 	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); | 
 | 64 | 	macb_writel(bp, SA1T, top); | 
 | 65 | } | 
 | 66 |  | 
 | 67 | static void __init macb_get_hwaddr(struct macb *bp) | 
 | 68 | { | 
 | 69 | 	u32 bottom; | 
 | 70 | 	u16 top; | 
 | 71 | 	u8 addr[6]; | 
 | 72 |  | 
 | 73 | 	bottom = macb_readl(bp, SA1B); | 
 | 74 | 	top = macb_readl(bp, SA1T); | 
 | 75 |  | 
 | 76 | 	addr[0] = bottom & 0xff; | 
 | 77 | 	addr[1] = (bottom >> 8) & 0xff; | 
 | 78 | 	addr[2] = (bottom >> 16) & 0xff; | 
 | 79 | 	addr[3] = (bottom >> 24) & 0xff; | 
 | 80 | 	addr[4] = top & 0xff; | 
 | 81 | 	addr[5] = (top >> 8) & 0xff; | 
 | 82 |  | 
| Sven Schnelle | d1d5741 | 2008-06-09 16:33:57 -0700 | [diff] [blame] | 83 | 	if (is_valid_ether_addr(addr)) { | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 84 | 		memcpy(bp->dev->dev_addr, addr, sizeof(addr)); | 
| Sven Schnelle | d1d5741 | 2008-06-09 16:33:57 -0700 | [diff] [blame] | 85 | 	} else { | 
 | 86 | 		dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); | 
 | 87 | 		random_ether_addr(bp->dev->dev_addr); | 
 | 88 | 	} | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 89 | } | 
 | 90 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 91 | static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 92 | { | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 93 | 	struct macb *bp = bus->priv; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 94 | 	int value; | 
 | 95 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 96 | 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | 
 | 97 | 			      | MACB_BF(RW, MACB_MAN_READ) | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 98 | 			      | MACB_BF(PHYA, mii_id) | 
 | 99 | 			      | MACB_BF(REGA, regnum) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 100 | 			      | MACB_BF(CODE, MACB_MAN_CODE))); | 
 | 101 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 102 | 	/* wait for end of transfer */ | 
 | 103 | 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) | 
 | 104 | 		cpu_relax(); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 105 |  | 
 | 106 | 	value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 107 |  | 
 | 108 | 	return value; | 
 | 109 | } | 
 | 110 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 111 | static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | 
 | 112 | 			   u16 value) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 113 | { | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 114 | 	struct macb *bp = bus->priv; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 115 |  | 
 | 116 | 	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | 
 | 117 | 			      | MACB_BF(RW, MACB_MAN_WRITE) | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 118 | 			      | MACB_BF(PHYA, mii_id) | 
 | 119 | 			      | MACB_BF(REGA, regnum) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 120 | 			      | MACB_BF(CODE, MACB_MAN_CODE) | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 121 | 			      | MACB_BF(DATA, value))); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 122 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 123 | 	/* wait for end of transfer */ | 
 | 124 | 	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) | 
 | 125 | 		cpu_relax(); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 126 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 127 | 	return 0; | 
 | 128 | } | 
 | 129 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 130 | static int macb_mdio_reset(struct mii_bus *bus) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 131 | { | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 132 | 	return 0; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 133 | } | 
 | 134 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 135 | static void macb_handle_link_change(struct net_device *dev) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 136 | { | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 137 | 	struct macb *bp = netdev_priv(dev); | 
 | 138 | 	struct phy_device *phydev = bp->phy_dev; | 
 | 139 | 	unsigned long flags; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 140 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 141 | 	int status_change = 0; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 142 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 143 | 	spin_lock_irqsave(&bp->lock, flags); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 144 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 145 | 	if (phydev->link) { | 
 | 146 | 		if ((bp->speed != phydev->speed) || | 
 | 147 | 		    (bp->duplex != phydev->duplex)) { | 
 | 148 | 			u32 reg; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 149 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 150 | 			reg = macb_readl(bp, NCFGR); | 
 | 151 | 			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); | 
 | 152 |  | 
 | 153 | 			if (phydev->duplex) | 
 | 154 | 				reg |= MACB_BIT(FD); | 
| Atsushi Nemoto | 179956f | 2008-02-21 22:50:54 +0900 | [diff] [blame] | 155 | 			if (phydev->speed == SPEED_100) | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 156 | 				reg |= MACB_BIT(SPD); | 
 | 157 |  | 
 | 158 | 			macb_writel(bp, NCFGR, reg); | 
 | 159 |  | 
 | 160 | 			bp->speed = phydev->speed; | 
 | 161 | 			bp->duplex = phydev->duplex; | 
 | 162 | 			status_change = 1; | 
 | 163 | 		} | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 164 | 	} | 
 | 165 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 166 | 	if (phydev->link != bp->link) { | 
| Anton Vorontsov | c8f1568 | 2008-07-22 15:41:24 -0700 | [diff] [blame] | 167 | 		if (!phydev->link) { | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 168 | 			bp->speed = 0; | 
 | 169 | 			bp->duplex = -1; | 
 | 170 | 		} | 
 | 171 | 		bp->link = phydev->link; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 172 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 173 | 		status_change = 1; | 
 | 174 | 	} | 
 | 175 |  | 
 | 176 | 	spin_unlock_irqrestore(&bp->lock, flags); | 
 | 177 |  | 
 | 178 | 	if (status_change) { | 
 | 179 | 		if (phydev->link) | 
 | 180 | 			printk(KERN_INFO "%s: link up (%d/%s)\n", | 
 | 181 | 			       dev->name, phydev->speed, | 
 | 182 | 			       DUPLEX_FULL == phydev->duplex ? "Full":"Half"); | 
 | 183 | 		else | 
 | 184 | 			printk(KERN_INFO "%s: link down\n", dev->name); | 
 | 185 | 	} | 
 | 186 | } | 
 | 187 |  | 
 | 188 | /* based on au1000_eth. c*/ | 
 | 189 | static int macb_mii_probe(struct net_device *dev) | 
 | 190 | { | 
 | 191 | 	struct macb *bp = netdev_priv(dev); | 
| Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 192 | 	struct phy_device *phydev; | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 193 | 	struct eth_platform_data *pdata; | 
| Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 194 | 	int ret; | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 195 |  | 
| Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 196 | 	phydev = phy_find_first(bp->mii_bus); | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 197 | 	if (!phydev) { | 
 | 198 | 		printk (KERN_ERR "%s: no PHY found\n", dev->name); | 
 | 199 | 		return -1; | 
 | 200 | 	} | 
 | 201 |  | 
 | 202 | 	pdata = bp->pdev->dev.platform_data; | 
 | 203 | 	/* TODO : add pin_irq */ | 
 | 204 |  | 
 | 205 | 	/* attach the mac to the phy */ | 
| Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 206 | 	ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0, | 
 | 207 | 				 pdata && pdata->is_rmii ? | 
 | 208 | 				 PHY_INTERFACE_MODE_RMII : | 
 | 209 | 				 PHY_INTERFACE_MODE_MII); | 
 | 210 | 	if (ret) { | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 211 | 		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 
| Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 212 | 		return ret; | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 213 | 	} | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 214 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 215 | 	/* mask with MAC supported features */ | 
 | 216 | 	phydev->supported &= PHY_BASIC_FEATURES; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 217 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 218 | 	phydev->advertising = phydev->supported; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 219 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 220 | 	bp->link = 0; | 
 | 221 | 	bp->speed = 0; | 
 | 222 | 	bp->duplex = -1; | 
 | 223 | 	bp->phy_dev = phydev; | 
 | 224 |  | 
 | 225 | 	return 0; | 
 | 226 | } | 
 | 227 |  | 
 | 228 | static int macb_mii_init(struct macb *bp) | 
 | 229 | { | 
 | 230 | 	struct eth_platform_data *pdata; | 
 | 231 | 	int err = -ENXIO, i; | 
 | 232 |  | 
| Uwe Kleine-Koenig | 3dbda77 | 2009-07-23 08:31:31 +0200 | [diff] [blame] | 233 | 	/* Enable management port */ | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 234 | 	macb_writel(bp, NCR, MACB_BIT(MPE)); | 
 | 235 |  | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 236 | 	bp->mii_bus = mdiobus_alloc(); | 
 | 237 | 	if (bp->mii_bus == NULL) { | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 238 | 		err = -ENOMEM; | 
 | 239 | 		goto err_out; | 
 | 240 | 	} | 
 | 241 |  | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 242 | 	bp->mii_bus->name = "MACB_mii_bus"; | 
 | 243 | 	bp->mii_bus->read = &macb_mdio_read; | 
 | 244 | 	bp->mii_bus->write = &macb_mdio_write; | 
 | 245 | 	bp->mii_bus->reset = &macb_mdio_reset; | 
 | 246 | 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id); | 
 | 247 | 	bp->mii_bus->priv = bp; | 
 | 248 | 	bp->mii_bus->parent = &bp->dev->dev; | 
 | 249 | 	pdata = bp->pdev->dev.platform_data; | 
 | 250 |  | 
 | 251 | 	if (pdata) | 
 | 252 | 		bp->mii_bus->phy_mask = pdata->phy_mask; | 
 | 253 |  | 
 | 254 | 	bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 
 | 255 | 	if (!bp->mii_bus->irq) { | 
 | 256 | 		err = -ENOMEM; | 
 | 257 | 		goto err_out_free_mdiobus; | 
 | 258 | 	} | 
 | 259 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 260 | 	for (i = 0; i < PHY_MAX_ADDR; i++) | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 261 | 		bp->mii_bus->irq[i] = PHY_POLL; | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 262 |  | 
| Jamie Iles | 9152394 | 2011-02-28 04:05:25 +0000 | [diff] [blame] | 263 | 	dev_set_drvdata(&bp->dev->dev, bp->mii_bus); | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 264 |  | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 265 | 	if (mdiobus_register(bp->mii_bus)) | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 266 | 		goto err_out_free_mdio_irq; | 
 | 267 |  | 
 | 268 | 	if (macb_mii_probe(bp->dev) != 0) { | 
 | 269 | 		goto err_out_unregister_bus; | 
 | 270 | 	} | 
 | 271 |  | 
 | 272 | 	return 0; | 
 | 273 |  | 
 | 274 | err_out_unregister_bus: | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 275 | 	mdiobus_unregister(bp->mii_bus); | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 276 | err_out_free_mdio_irq: | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 277 | 	kfree(bp->mii_bus->irq); | 
 | 278 | err_out_free_mdiobus: | 
 | 279 | 	mdiobus_free(bp->mii_bus); | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 280 | err_out: | 
 | 281 | 	return err; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 282 | } | 
 | 283 |  | 
 | 284 | static void macb_update_stats(struct macb *bp) | 
 | 285 | { | 
 | 286 | 	u32 __iomem *reg = bp->regs + MACB_PFR; | 
 | 287 | 	u32 *p = &bp->hw_stats.rx_pause_frames; | 
 | 288 | 	u32 *end = &bp->hw_stats.tx_pause_frames + 1; | 
 | 289 |  | 
 | 290 | 	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | 
 | 291 |  | 
 | 292 | 	for(; p < end; p++, reg++) | 
| Haavard Skinnemoen | 0f0d84e | 2006-12-08 14:38:30 +0100 | [diff] [blame] | 293 | 		*p += __raw_readl(reg); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 294 | } | 
 | 295 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 296 | static void macb_tx(struct macb *bp) | 
 | 297 | { | 
 | 298 | 	unsigned int tail; | 
 | 299 | 	unsigned int head; | 
 | 300 | 	u32 status; | 
 | 301 |  | 
 | 302 | 	status = macb_readl(bp, TSR); | 
 | 303 | 	macb_writel(bp, TSR, status); | 
 | 304 |  | 
 | 305 | 	dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n", | 
 | 306 | 		(unsigned long)status); | 
 | 307 |  | 
| Erik Waling | ee33c58 | 2009-04-15 23:32:10 +0000 | [diff] [blame] | 308 | 	if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { | 
| Gregory CLEMENT | bdcba151 | 2007-12-19 18:23:44 +0100 | [diff] [blame] | 309 | 		int i; | 
| Erik Waling | ee33c58 | 2009-04-15 23:32:10 +0000 | [diff] [blame] | 310 | 		printk(KERN_ERR "%s: TX %s, resetting buffers\n", | 
 | 311 | 			bp->dev->name, status & MACB_BIT(UND) ? | 
 | 312 | 			"underrun" : "retry limit exceeded"); | 
| Gregory CLEMENT | bdcba151 | 2007-12-19 18:23:44 +0100 | [diff] [blame] | 313 |  | 
| Richard Röjfors | 39eddb4 | 2009-01-18 21:57:35 -0800 | [diff] [blame] | 314 | 		/* Transfer ongoing, disable transmitter, to avoid confusion */ | 
 | 315 | 		if (status & MACB_BIT(TGO)) | 
 | 316 | 			macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE)); | 
 | 317 |  | 
| Gregory CLEMENT | bdcba151 | 2007-12-19 18:23:44 +0100 | [diff] [blame] | 318 | 		head = bp->tx_head; | 
 | 319 |  | 
 | 320 | 		/*Mark all the buffer as used to avoid sending a lost buffer*/ | 
 | 321 | 		for (i = 0; i < TX_RING_SIZE; i++) | 
 | 322 | 			bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); | 
 | 323 |  | 
 | 324 | 		/* free transmit buffer in upper layer*/ | 
 | 325 | 		for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { | 
 | 326 | 			struct ring_info *rp = &bp->tx_skb[tail]; | 
 | 327 | 			struct sk_buff *skb = rp->skb; | 
 | 328 |  | 
 | 329 | 			BUG_ON(skb == NULL); | 
 | 330 |  | 
 | 331 | 			rmb(); | 
 | 332 |  | 
 | 333 | 			dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, | 
 | 334 | 							 DMA_TO_DEVICE); | 
 | 335 | 			rp->skb = NULL; | 
 | 336 | 			dev_kfree_skb_irq(skb); | 
 | 337 | 		} | 
 | 338 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 339 | 		bp->tx_head = bp->tx_tail = 0; | 
| Richard Röjfors | 39eddb4 | 2009-01-18 21:57:35 -0800 | [diff] [blame] | 340 |  | 
 | 341 | 		/* Enable the transmitter again */ | 
 | 342 | 		if (status & MACB_BIT(TGO)) | 
 | 343 | 			macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE)); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 344 | 	} | 
 | 345 |  | 
 | 346 | 	if (!(status & MACB_BIT(COMP))) | 
 | 347 | 		/* | 
 | 348 | 		 * This may happen when a buffer becomes complete | 
 | 349 | 		 * between reading the ISR and scanning the | 
 | 350 | 		 * descriptors.  Nothing to worry about. | 
 | 351 | 		 */ | 
 | 352 | 		return; | 
 | 353 |  | 
 | 354 | 	head = bp->tx_head; | 
 | 355 | 	for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { | 
 | 356 | 		struct ring_info *rp = &bp->tx_skb[tail]; | 
 | 357 | 		struct sk_buff *skb = rp->skb; | 
 | 358 | 		u32 bufstat; | 
 | 359 |  | 
 | 360 | 		BUG_ON(skb == NULL); | 
 | 361 |  | 
 | 362 | 		rmb(); | 
 | 363 | 		bufstat = bp->tx_ring[tail].ctrl; | 
 | 364 |  | 
 | 365 | 		if (!(bufstat & MACB_BIT(TX_USED))) | 
 | 366 | 			break; | 
 | 367 |  | 
 | 368 | 		dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n", | 
 | 369 | 			tail, skb->data); | 
 | 370 | 		dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, | 
 | 371 | 				 DMA_TO_DEVICE); | 
 | 372 | 		bp->stats.tx_packets++; | 
 | 373 | 		bp->stats.tx_bytes += skb->len; | 
 | 374 | 		rp->skb = NULL; | 
 | 375 | 		dev_kfree_skb_irq(skb); | 
 | 376 | 	} | 
 | 377 |  | 
 | 378 | 	bp->tx_tail = tail; | 
 | 379 | 	if (netif_queue_stopped(bp->dev) && | 
 | 380 | 	    TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) | 
 | 381 | 		netif_wake_queue(bp->dev); | 
 | 382 | } | 
 | 383 |  | 
 | 384 | static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | 
 | 385 | 			 unsigned int last_frag) | 
 | 386 | { | 
 | 387 | 	unsigned int len; | 
 | 388 | 	unsigned int frag; | 
 | 389 | 	unsigned int offset = 0; | 
 | 390 | 	struct sk_buff *skb; | 
 | 391 |  | 
 | 392 | 	len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); | 
 | 393 |  | 
 | 394 | 	dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n", | 
 | 395 | 		first_frag, last_frag, len); | 
 | 396 |  | 
 | 397 | 	skb = dev_alloc_skb(len + RX_OFFSET); | 
 | 398 | 	if (!skb) { | 
 | 399 | 		bp->stats.rx_dropped++; | 
 | 400 | 		for (frag = first_frag; ; frag = NEXT_RX(frag)) { | 
 | 401 | 			bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 
 | 402 | 			if (frag == last_frag) | 
 | 403 | 				break; | 
 | 404 | 		} | 
 | 405 | 		wmb(); | 
 | 406 | 		return 1; | 
 | 407 | 	} | 
 | 408 |  | 
 | 409 | 	skb_reserve(skb, RX_OFFSET); | 
| Eric Dumazet | bc8acf2 | 2010-09-02 13:07:41 -0700 | [diff] [blame] | 410 | 	skb_checksum_none_assert(skb); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 411 | 	skb_put(skb, len); | 
 | 412 |  | 
 | 413 | 	for (frag = first_frag; ; frag = NEXT_RX(frag)) { | 
 | 414 | 		unsigned int frag_len = RX_BUFFER_SIZE; | 
 | 415 |  | 
 | 416 | 		if (offset + frag_len > len) { | 
 | 417 | 			BUG_ON(frag != last_frag); | 
 | 418 | 			frag_len = len - offset; | 
 | 419 | 		} | 
| Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 420 | 		skb_copy_to_linear_data_offset(skb, offset, | 
 | 421 | 					       (bp->rx_buffers + | 
 | 422 | 					        (RX_BUFFER_SIZE * frag)), | 
 | 423 | 					       frag_len); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 424 | 		offset += RX_BUFFER_SIZE; | 
 | 425 | 		bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 
 | 426 | 		wmb(); | 
 | 427 |  | 
 | 428 | 		if (frag == last_frag) | 
 | 429 | 			break; | 
 | 430 | 	} | 
 | 431 |  | 
 | 432 | 	skb->protocol = eth_type_trans(skb, bp->dev); | 
 | 433 |  | 
 | 434 | 	bp->stats.rx_packets++; | 
 | 435 | 	bp->stats.rx_bytes += len; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 436 | 	dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n", | 
 | 437 | 		skb->len, skb->csum); | 
 | 438 | 	netif_receive_skb(skb); | 
 | 439 |  | 
 | 440 | 	return 0; | 
 | 441 | } | 
 | 442 |  | 
 | 443 | /* Mark DMA descriptors from begin up to and not including end as unused */ | 
 | 444 | static void discard_partial_frame(struct macb *bp, unsigned int begin, | 
 | 445 | 				  unsigned int end) | 
 | 446 | { | 
 | 447 | 	unsigned int frag; | 
 | 448 |  | 
 | 449 | 	for (frag = begin; frag != end; frag = NEXT_RX(frag)) | 
 | 450 | 		bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 
 | 451 | 	wmb(); | 
 | 452 |  | 
 | 453 | 	/* | 
 | 454 | 	 * When this happens, the hardware stats registers for | 
 | 455 | 	 * whatever caused this is updated, so we don't have to record | 
 | 456 | 	 * anything. | 
 | 457 | 	 */ | 
 | 458 | } | 
 | 459 |  | 
 | 460 | static int macb_rx(struct macb *bp, int budget) | 
 | 461 | { | 
 | 462 | 	int received = 0; | 
 | 463 | 	unsigned int tail = bp->rx_tail; | 
 | 464 | 	int first_frag = -1; | 
 | 465 |  | 
 | 466 | 	for (; budget > 0; tail = NEXT_RX(tail)) { | 
 | 467 | 		u32 addr, ctrl; | 
 | 468 |  | 
 | 469 | 		rmb(); | 
 | 470 | 		addr = bp->rx_ring[tail].addr; | 
 | 471 | 		ctrl = bp->rx_ring[tail].ctrl; | 
 | 472 |  | 
 | 473 | 		if (!(addr & MACB_BIT(RX_USED))) | 
 | 474 | 			break; | 
 | 475 |  | 
 | 476 | 		if (ctrl & MACB_BIT(RX_SOF)) { | 
 | 477 | 			if (first_frag != -1) | 
 | 478 | 				discard_partial_frame(bp, first_frag, tail); | 
 | 479 | 			first_frag = tail; | 
 | 480 | 		} | 
 | 481 |  | 
 | 482 | 		if (ctrl & MACB_BIT(RX_EOF)) { | 
 | 483 | 			int dropped; | 
 | 484 | 			BUG_ON(first_frag == -1); | 
 | 485 |  | 
 | 486 | 			dropped = macb_rx_frame(bp, first_frag, tail); | 
 | 487 | 			first_frag = -1; | 
 | 488 | 			if (!dropped) { | 
 | 489 | 				received++; | 
 | 490 | 				budget--; | 
 | 491 | 			} | 
 | 492 | 		} | 
 | 493 | 	} | 
 | 494 |  | 
 | 495 | 	if (first_frag != -1) | 
 | 496 | 		bp->rx_tail = first_frag; | 
 | 497 | 	else | 
 | 498 | 		bp->rx_tail = tail; | 
 | 499 |  | 
 | 500 | 	return received; | 
 | 501 | } | 
 | 502 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 503 | static int macb_poll(struct napi_struct *napi, int budget) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 504 | { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 505 | 	struct macb *bp = container_of(napi, struct macb, napi); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 506 | 	int work_done; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 507 | 	u32 status; | 
 | 508 |  | 
 | 509 | 	status = macb_readl(bp, RSR); | 
 | 510 | 	macb_writel(bp, RSR, status); | 
 | 511 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 512 | 	work_done = 0; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 513 |  | 
 | 514 | 	dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 515 | 		(unsigned long)status, budget); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 516 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 517 | 	work_done = macb_rx(bp, budget); | 
| Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 518 | 	if (work_done < budget) { | 
| Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 519 | 		napi_complete(napi); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 520 |  | 
| Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 521 | 		/* | 
 | 522 | 		 * We've done what we can to clean the buffers. Make sure we | 
 | 523 | 		 * get notified when new packets arrive. | 
 | 524 | 		 */ | 
 | 525 | 		macb_writel(bp, IER, MACB_RX_INT_FLAGS); | 
 | 526 | 	} | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 527 |  | 
 | 528 | 	/* TODO: Handle errors */ | 
 | 529 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 530 | 	return work_done; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 531 | } | 
 | 532 |  | 
 | 533 | static irqreturn_t macb_interrupt(int irq, void *dev_id) | 
 | 534 | { | 
 | 535 | 	struct net_device *dev = dev_id; | 
 | 536 | 	struct macb *bp = netdev_priv(dev); | 
 | 537 | 	u32 status; | 
 | 538 |  | 
 | 539 | 	status = macb_readl(bp, ISR); | 
 | 540 |  | 
 | 541 | 	if (unlikely(!status)) | 
 | 542 | 		return IRQ_NONE; | 
 | 543 |  | 
 | 544 | 	spin_lock(&bp->lock); | 
 | 545 |  | 
 | 546 | 	while (status) { | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 547 | 		/* close possible race with dev_close */ | 
 | 548 | 		if (unlikely(!netif_running(dev))) { | 
 | 549 | 			macb_writel(bp, IDR, ~0UL); | 
 | 550 | 			break; | 
 | 551 | 		} | 
 | 552 |  | 
 | 553 | 		if (status & MACB_RX_INT_FLAGS) { | 
| Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 554 | 			/* | 
 | 555 | 			 * There's no point taking any more interrupts | 
 | 556 | 			 * until we have processed the buffers. The | 
 | 557 | 			 * scheduling call may fail if the poll routine | 
 | 558 | 			 * is already scheduled, so disable interrupts | 
 | 559 | 			 * now. | 
 | 560 | 			 */ | 
 | 561 | 			macb_writel(bp, IDR, MACB_RX_INT_FLAGS); | 
 | 562 |  | 
| Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 563 | 			if (napi_schedule_prep(&bp->napi)) { | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 564 | 				dev_dbg(&bp->pdev->dev, | 
 | 565 | 					"scheduling RX softirq\n"); | 
| Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 566 | 				__napi_schedule(&bp->napi); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 567 | 			} | 
 | 568 | 		} | 
 | 569 |  | 
| Erik Waling | ee33c58 | 2009-04-15 23:32:10 +0000 | [diff] [blame] | 570 | 		if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | | 
 | 571 | 			    MACB_BIT(ISR_RLE))) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 572 | 			macb_tx(bp); | 
 | 573 |  | 
 | 574 | 		/* | 
 | 575 | 		 * Link change detection isn't possible with RMII, so we'll | 
 | 576 | 		 * add that if/when we get our hands on a full-blown MII PHY. | 
 | 577 | 		 */ | 
 | 578 |  | 
| Alexander Stein | b19f7f7 | 2011-04-13 05:03:24 +0000 | [diff] [blame] | 579 | 		if (status & MACB_BIT(ISR_ROVR)) { | 
 | 580 | 			/* We missed at least one packet */ | 
 | 581 | 			bp->hw_stats.rx_overruns++; | 
 | 582 | 		} | 
 | 583 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 584 | 		if (status & MACB_BIT(HRESP)) { | 
 | 585 | 			/* | 
 | 586 | 			 * TODO: Reset the hardware, and maybe move the printk | 
 | 587 | 			 * to a lower-priority context as well (work queue?) | 
 | 588 | 			 */ | 
 | 589 | 			printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n", | 
 | 590 | 			       dev->name); | 
 | 591 | 		} | 
 | 592 |  | 
 | 593 | 		status = macb_readl(bp, ISR); | 
 | 594 | 	} | 
 | 595 |  | 
 | 596 | 	spin_unlock(&bp->lock); | 
 | 597 |  | 
 | 598 | 	return IRQ_HANDLED; | 
 | 599 | } | 
 | 600 |  | 
| Thomas Petazzoni | 6e8cf5c | 2009-05-04 11:08:41 -0700 | [diff] [blame] | 601 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 602 | /* | 
 | 603 |  * Polling receive - used by netconsole and other diagnostic tools | 
 | 604 |  * to allow network i/o with interrupts disabled. | 
 | 605 |  */ | 
 | 606 | static void macb_poll_controller(struct net_device *dev) | 
 | 607 | { | 
 | 608 | 	unsigned long flags; | 
 | 609 |  | 
 | 610 | 	local_irq_save(flags); | 
 | 611 | 	macb_interrupt(dev->irq, dev); | 
 | 612 | 	local_irq_restore(flags); | 
 | 613 | } | 
 | 614 | #endif | 
 | 615 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 616 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | 
 | 617 | { | 
 | 618 | 	struct macb *bp = netdev_priv(dev); | 
 | 619 | 	dma_addr_t mapping; | 
 | 620 | 	unsigned int len, entry; | 
 | 621 | 	u32 ctrl; | 
| Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 622 | 	unsigned long flags; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 623 |  | 
 | 624 | #ifdef DEBUG | 
 | 625 | 	int i; | 
 | 626 | 	dev_dbg(&bp->pdev->dev, | 
 | 627 | 		"start_xmit: len %u head %p data %p tail %p end %p\n", | 
| Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 628 | 		skb->len, skb->head, skb->data, | 
| Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 629 | 		skb_tail_pointer(skb), skb_end_pointer(skb)); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 630 | 	dev_dbg(&bp->pdev->dev, | 
 | 631 | 		"data:"); | 
 | 632 | 	for (i = 0; i < 16; i++) | 
 | 633 | 		printk(" %02x", (unsigned int)skb->data[i]); | 
 | 634 | 	printk("\n"); | 
 | 635 | #endif | 
 | 636 |  | 
 | 637 | 	len = skb->len; | 
| Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 638 | 	spin_lock_irqsave(&bp->lock, flags); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 639 |  | 
 | 640 | 	/* This is a hard error, log it. */ | 
 | 641 | 	if (TX_BUFFS_AVAIL(bp) < 1) { | 
 | 642 | 		netif_stop_queue(dev); | 
| Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 643 | 		spin_unlock_irqrestore(&bp->lock, flags); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 644 | 		dev_err(&bp->pdev->dev, | 
 | 645 | 			"BUG! Tx Ring full when queue awake!\n"); | 
 | 646 | 		dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", | 
 | 647 | 			bp->tx_head, bp->tx_tail); | 
| Patrick McHardy | 5b54814 | 2009-06-12 06:22:29 +0000 | [diff] [blame] | 648 | 		return NETDEV_TX_BUSY; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 649 | 	} | 
 | 650 |  | 
 | 651 | 	entry = bp->tx_head; | 
 | 652 | 	dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry); | 
 | 653 | 	mapping = dma_map_single(&bp->pdev->dev, skb->data, | 
 | 654 | 				 len, DMA_TO_DEVICE); | 
 | 655 | 	bp->tx_skb[entry].skb = skb; | 
 | 656 | 	bp->tx_skb[entry].mapping = mapping; | 
 | 657 | 	dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n", | 
 | 658 | 		skb->data, (unsigned long)mapping); | 
 | 659 |  | 
 | 660 | 	ctrl = MACB_BF(TX_FRMLEN, len); | 
 | 661 | 	ctrl |= MACB_BIT(TX_LAST); | 
 | 662 | 	if (entry == (TX_RING_SIZE - 1)) | 
 | 663 | 		ctrl |= MACB_BIT(TX_WRAP); | 
 | 664 |  | 
 | 665 | 	bp->tx_ring[entry].addr = mapping; | 
 | 666 | 	bp->tx_ring[entry].ctrl = ctrl; | 
 | 667 | 	wmb(); | 
 | 668 |  | 
 | 669 | 	entry = NEXT_TX(entry); | 
 | 670 | 	bp->tx_head = entry; | 
 | 671 |  | 
 | 672 | 	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); | 
 | 673 |  | 
 | 674 | 	if (TX_BUFFS_AVAIL(bp) < 1) | 
 | 675 | 		netif_stop_queue(dev); | 
 | 676 |  | 
| Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 677 | 	spin_unlock_irqrestore(&bp->lock, flags); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 678 |  | 
| Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 679 | 	return NETDEV_TX_OK; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 680 | } | 
 | 681 |  | 
 | 682 | static void macb_free_consistent(struct macb *bp) | 
 | 683 | { | 
 | 684 | 	if (bp->tx_skb) { | 
 | 685 | 		kfree(bp->tx_skb); | 
 | 686 | 		bp->tx_skb = NULL; | 
 | 687 | 	} | 
 | 688 | 	if (bp->rx_ring) { | 
 | 689 | 		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, | 
 | 690 | 				  bp->rx_ring, bp->rx_ring_dma); | 
 | 691 | 		bp->rx_ring = NULL; | 
 | 692 | 	} | 
 | 693 | 	if (bp->tx_ring) { | 
 | 694 | 		dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, | 
 | 695 | 				  bp->tx_ring, bp->tx_ring_dma); | 
 | 696 | 		bp->tx_ring = NULL; | 
 | 697 | 	} | 
 | 698 | 	if (bp->rx_buffers) { | 
 | 699 | 		dma_free_coherent(&bp->pdev->dev, | 
 | 700 | 				  RX_RING_SIZE * RX_BUFFER_SIZE, | 
 | 701 | 				  bp->rx_buffers, bp->rx_buffers_dma); | 
 | 702 | 		bp->rx_buffers = NULL; | 
 | 703 | 	} | 
 | 704 | } | 
 | 705 |  | 
 | 706 | static int macb_alloc_consistent(struct macb *bp) | 
 | 707 | { | 
 | 708 | 	int size; | 
 | 709 |  | 
 | 710 | 	size = TX_RING_SIZE * sizeof(struct ring_info); | 
 | 711 | 	bp->tx_skb = kmalloc(size, GFP_KERNEL); | 
 | 712 | 	if (!bp->tx_skb) | 
 | 713 | 		goto out_err; | 
 | 714 |  | 
 | 715 | 	size = RX_RING_BYTES; | 
 | 716 | 	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | 
 | 717 | 					 &bp->rx_ring_dma, GFP_KERNEL); | 
 | 718 | 	if (!bp->rx_ring) | 
 | 719 | 		goto out_err; | 
 | 720 | 	dev_dbg(&bp->pdev->dev, | 
 | 721 | 		"Allocated RX ring of %d bytes at %08lx (mapped %p)\n", | 
 | 722 | 		size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); | 
 | 723 |  | 
 | 724 | 	size = TX_RING_BYTES; | 
 | 725 | 	bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | 
 | 726 | 					 &bp->tx_ring_dma, GFP_KERNEL); | 
 | 727 | 	if (!bp->tx_ring) | 
 | 728 | 		goto out_err; | 
 | 729 | 	dev_dbg(&bp->pdev->dev, | 
 | 730 | 		"Allocated TX ring of %d bytes at %08lx (mapped %p)\n", | 
 | 731 | 		size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); | 
 | 732 |  | 
 | 733 | 	size = RX_RING_SIZE * RX_BUFFER_SIZE; | 
 | 734 | 	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, | 
 | 735 | 					    &bp->rx_buffers_dma, GFP_KERNEL); | 
 | 736 | 	if (!bp->rx_buffers) | 
 | 737 | 		goto out_err; | 
 | 738 | 	dev_dbg(&bp->pdev->dev, | 
 | 739 | 		"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", | 
 | 740 | 		size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); | 
 | 741 |  | 
 | 742 | 	return 0; | 
 | 743 |  | 
 | 744 | out_err: | 
 | 745 | 	macb_free_consistent(bp); | 
 | 746 | 	return -ENOMEM; | 
 | 747 | } | 
 | 748 |  | 
 | 749 | static void macb_init_rings(struct macb *bp) | 
 | 750 | { | 
 | 751 | 	int i; | 
 | 752 | 	dma_addr_t addr; | 
 | 753 |  | 
 | 754 | 	addr = bp->rx_buffers_dma; | 
 | 755 | 	for (i = 0; i < RX_RING_SIZE; i++) { | 
 | 756 | 		bp->rx_ring[i].addr = addr; | 
 | 757 | 		bp->rx_ring[i].ctrl = 0; | 
 | 758 | 		addr += RX_BUFFER_SIZE; | 
 | 759 | 	} | 
 | 760 | 	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); | 
 | 761 |  | 
 | 762 | 	for (i = 0; i < TX_RING_SIZE; i++) { | 
 | 763 | 		bp->tx_ring[i].addr = 0; | 
 | 764 | 		bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); | 
 | 765 | 	} | 
 | 766 | 	bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); | 
 | 767 |  | 
 | 768 | 	bp->rx_tail = bp->tx_head = bp->tx_tail = 0; | 
 | 769 | } | 
 | 770 |  | 
 | 771 | static void macb_reset_hw(struct macb *bp) | 
 | 772 | { | 
 | 773 | 	/* Make sure we have the write buffer for ourselves */ | 
 | 774 | 	wmb(); | 
 | 775 |  | 
 | 776 | 	/* | 
 | 777 | 	 * Disable RX and TX (XXX: Should we halt the transmission | 
 | 778 | 	 * more gracefully?) | 
 | 779 | 	 */ | 
 | 780 | 	macb_writel(bp, NCR, 0); | 
 | 781 |  | 
 | 782 | 	/* Clear the stats registers (XXX: Update stats first?) */ | 
 | 783 | 	macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); | 
 | 784 |  | 
 | 785 | 	/* Clear all status flags */ | 
 | 786 | 	macb_writel(bp, TSR, ~0UL); | 
 | 787 | 	macb_writel(bp, RSR, ~0UL); | 
 | 788 |  | 
 | 789 | 	/* Disable all interrupts */ | 
 | 790 | 	macb_writel(bp, IDR, ~0UL); | 
 | 791 | 	macb_readl(bp, ISR); | 
 | 792 | } | 
 | 793 |  | 
 | 794 | static void macb_init_hw(struct macb *bp) | 
 | 795 | { | 
 | 796 | 	u32 config; | 
 | 797 |  | 
 | 798 | 	macb_reset_hw(bp); | 
 | 799 | 	__macb_set_hwaddr(bp); | 
 | 800 |  | 
 | 801 | 	config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); | 
 | 802 | 	config |= MACB_BIT(PAE);		/* PAuse Enable */ | 
 | 803 | 	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */ | 
| Peter Korsgaard | 8dd4bd0 | 2010-04-07 21:53:41 -0700 | [diff] [blame] | 804 | 	config |= MACB_BIT(BIG);		/* Receive oversized frames */ | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 805 | 	if (bp->dev->flags & IFF_PROMISC) | 
 | 806 | 		config |= MACB_BIT(CAF);	/* Copy All Frames */ | 
 | 807 | 	if (!(bp->dev->flags & IFF_BROADCAST)) | 
 | 808 | 		config |= MACB_BIT(NBC);	/* No BroadCast */ | 
 | 809 | 	macb_writel(bp, NCFGR, config); | 
 | 810 |  | 
 | 811 | 	/* Initialize TX and RX buffers */ | 
 | 812 | 	macb_writel(bp, RBQP, bp->rx_ring_dma); | 
 | 813 | 	macb_writel(bp, TBQP, bp->tx_ring_dma); | 
 | 814 |  | 
 | 815 | 	/* Enable TX and RX */ | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 816 | 	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 817 |  | 
 | 818 | 	/* Enable interrupts */ | 
 | 819 | 	macb_writel(bp, IER, (MACB_BIT(RCOMP) | 
 | 820 | 			      | MACB_BIT(RXUBR) | 
 | 821 | 			      | MACB_BIT(ISR_TUND) | 
 | 822 | 			      | MACB_BIT(ISR_RLE) | 
 | 823 | 			      | MACB_BIT(TXERR) | 
 | 824 | 			      | MACB_BIT(TCOMP) | 
 | 825 | 			      | MACB_BIT(ISR_ROVR) | 
 | 826 | 			      | MACB_BIT(HRESP))); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 827 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 828 | } | 
 | 829 |  | 
| Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 830 | /* | 
 | 831 |  * The hash address register is 64 bits long and takes up two | 
 | 832 |  * locations in the memory map.  The least significant bits are stored | 
 | 833 |  * in EMAC_HSL and the most significant bits in EMAC_HSH. | 
 | 834 |  * | 
 | 835 |  * The unicast hash enable and the multicast hash enable bits in the | 
 | 836 |  * network configuration register enable the reception of hash matched | 
 | 837 |  * frames. The destination address is reduced to a 6 bit index into | 
 | 838 |  * the 64 bit hash register using the following hash function.  The | 
 | 839 |  * hash function is an exclusive or of every sixth bit of the | 
 | 840 |  * destination address. | 
 | 841 |  * | 
 | 842 |  * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] | 
 | 843 |  * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] | 
 | 844 |  * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] | 
 | 845 |  * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] | 
 | 846 |  * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] | 
 | 847 |  * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] | 
 | 848 |  * | 
 | 849 |  * da[0] represents the least significant bit of the first byte | 
 | 850 |  * received, that is, the multicast/unicast indicator, and da[47] | 
 | 851 |  * represents the most significant bit of the last byte received.  If | 
 | 852 |  * the hash index, hi[n], points to a bit that is set in the hash | 
 | 853 |  * register then the frame will be matched according to whether the | 
 | 854 |  * frame is multicast or unicast.  A multicast match will be signalled | 
 | 855 |  * if the multicast hash enable bit is set, da[0] is 1 and the hash | 
 | 856 |  * index points to a bit set in the hash register.  A unicast match | 
 | 857 |  * will be signalled if the unicast hash enable bit is set, da[0] is 0 | 
 | 858 |  * and the hash index points to a bit set in the hash register.  To | 
 | 859 |  * receive all multicast frames, the hash register should be set with | 
 | 860 |  * all ones and the multicast hash enable bit should be set in the | 
 | 861 |  * network configuration register. | 
 | 862 |  */ | 
 | 863 |  | 
 | 864 | static inline int hash_bit_value(int bitnr, __u8 *addr) | 
 | 865 | { | 
 | 866 | 	if (addr[bitnr / 8] & (1 << (bitnr % 8))) | 
 | 867 | 		return 1; | 
 | 868 | 	return 0; | 
 | 869 | } | 
 | 870 |  | 
 | 871 | /* | 
 | 872 |  * Return the hash index value for the specified address. | 
 | 873 |  */ | 
 | 874 | static int hash_get_index(__u8 *addr) | 
 | 875 | { | 
 | 876 | 	int i, j, bitval; | 
 | 877 | 	int hash_index = 0; | 
 | 878 |  | 
 | 879 | 	for (j = 0; j < 6; j++) { | 
 | 880 | 		for (i = 0, bitval = 0; i < 8; i++) | 
 | 881 | 			bitval ^= hash_bit_value(i*6 + j, addr); | 
 | 882 |  | 
 | 883 | 		hash_index |= (bitval << j); | 
 | 884 | 	} | 
 | 885 |  | 
 | 886 | 	return hash_index; | 
 | 887 | } | 
 | 888 |  | 
 | 889 | /* | 
 | 890 |  * Add multicast addresses to the internal multicast-hash table. | 
 | 891 |  */ | 
 | 892 | static void macb_sethashtable(struct net_device *dev) | 
 | 893 | { | 
| Jiri Pirko | 22bedad | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 894 | 	struct netdev_hw_addr *ha; | 
| Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 895 | 	unsigned long mc_filter[2]; | 
| Jiri Pirko | f9dcbcc | 2010-02-23 09:19:49 +0000 | [diff] [blame] | 896 | 	unsigned int bitnr; | 
| Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 897 | 	struct macb *bp = netdev_priv(dev); | 
 | 898 |  | 
 | 899 | 	mc_filter[0] = mc_filter[1] = 0; | 
 | 900 |  | 
| Jiri Pirko | 22bedad | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 901 | 	netdev_for_each_mc_addr(ha, dev) { | 
 | 902 | 		bitnr = hash_get_index(ha->addr); | 
| Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 903 | 		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); | 
 | 904 | 	} | 
 | 905 |  | 
 | 906 | 	macb_writel(bp, HRB, mc_filter[0]); | 
 | 907 | 	macb_writel(bp, HRT, mc_filter[1]); | 
 | 908 | } | 
 | 909 |  | 
 | 910 | /* | 
 | 911 |  * Enable/Disable promiscuous and multicast modes. | 
 | 912 |  */ | 
 | 913 | static void macb_set_rx_mode(struct net_device *dev) | 
 | 914 | { | 
 | 915 | 	unsigned long cfg; | 
 | 916 | 	struct macb *bp = netdev_priv(dev); | 
 | 917 |  | 
 | 918 | 	cfg = macb_readl(bp, NCFGR); | 
 | 919 |  | 
 | 920 | 	if (dev->flags & IFF_PROMISC) | 
 | 921 | 		/* Enable promiscuous mode */ | 
 | 922 | 		cfg |= MACB_BIT(CAF); | 
 | 923 | 	else if (dev->flags & (~IFF_PROMISC)) | 
 | 924 | 		 /* Disable promiscuous mode */ | 
 | 925 | 		cfg &= ~MACB_BIT(CAF); | 
 | 926 |  | 
 | 927 | 	if (dev->flags & IFF_ALLMULTI) { | 
 | 928 | 		/* Enable all multicast mode */ | 
 | 929 | 		macb_writel(bp, HRB, -1); | 
 | 930 | 		macb_writel(bp, HRT, -1); | 
 | 931 | 		cfg |= MACB_BIT(NCFGR_MTI); | 
| Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 932 | 	} else if (!netdev_mc_empty(dev)) { | 
| Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 933 | 		/* Enable specific multicasts */ | 
 | 934 | 		macb_sethashtable(dev); | 
 | 935 | 		cfg |= MACB_BIT(NCFGR_MTI); | 
 | 936 | 	} else if (dev->flags & (~IFF_ALLMULTI)) { | 
 | 937 | 		/* Disable all multicast mode */ | 
 | 938 | 		macb_writel(bp, HRB, 0); | 
 | 939 | 		macb_writel(bp, HRT, 0); | 
 | 940 | 		cfg &= ~MACB_BIT(NCFGR_MTI); | 
 | 941 | 	} | 
 | 942 |  | 
 | 943 | 	macb_writel(bp, NCFGR, cfg); | 
 | 944 | } | 
 | 945 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 946 | static int macb_open(struct net_device *dev) | 
 | 947 | { | 
 | 948 | 	struct macb *bp = netdev_priv(dev); | 
 | 949 | 	int err; | 
 | 950 |  | 
 | 951 | 	dev_dbg(&bp->pdev->dev, "open\n"); | 
 | 952 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 953 | 	/* if the phy is not yet register, retry later*/ | 
 | 954 | 	if (!bp->phy_dev) | 
 | 955 | 		return -EAGAIN; | 
 | 956 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 957 | 	if (!is_valid_ether_addr(dev->dev_addr)) | 
 | 958 | 		return -EADDRNOTAVAIL; | 
 | 959 |  | 
 | 960 | 	err = macb_alloc_consistent(bp); | 
 | 961 | 	if (err) { | 
 | 962 | 		printk(KERN_ERR | 
 | 963 | 		       "%s: Unable to allocate DMA memory (error %d)\n", | 
 | 964 | 		       dev->name, err); | 
 | 965 | 		return err; | 
 | 966 | 	} | 
 | 967 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 968 | 	napi_enable(&bp->napi); | 
 | 969 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 970 | 	macb_init_rings(bp); | 
 | 971 | 	macb_init_hw(bp); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 972 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 973 | 	/* schedule a link state check */ | 
 | 974 | 	phy_start(bp->phy_dev); | 
 | 975 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 976 | 	netif_start_queue(dev); | 
 | 977 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 978 | 	return 0; | 
 | 979 | } | 
 | 980 |  | 
 | 981 | static int macb_close(struct net_device *dev) | 
 | 982 | { | 
 | 983 | 	struct macb *bp = netdev_priv(dev); | 
 | 984 | 	unsigned long flags; | 
 | 985 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 986 | 	netif_stop_queue(dev); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 987 | 	napi_disable(&bp->napi); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 988 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 989 | 	if (bp->phy_dev) | 
 | 990 | 		phy_stop(bp->phy_dev); | 
 | 991 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 992 | 	spin_lock_irqsave(&bp->lock, flags); | 
 | 993 | 	macb_reset_hw(bp); | 
 | 994 | 	netif_carrier_off(dev); | 
 | 995 | 	spin_unlock_irqrestore(&bp->lock, flags); | 
 | 996 |  | 
 | 997 | 	macb_free_consistent(bp); | 
 | 998 |  | 
 | 999 | 	return 0; | 
 | 1000 | } | 
 | 1001 |  | 
 | 1002 | static struct net_device_stats *macb_get_stats(struct net_device *dev) | 
 | 1003 | { | 
 | 1004 | 	struct macb *bp = netdev_priv(dev); | 
 | 1005 | 	struct net_device_stats *nstat = &bp->stats; | 
 | 1006 | 	struct macb_stats *hwstat = &bp->hw_stats; | 
 | 1007 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1008 | 	/* read stats from hardware */ | 
 | 1009 | 	macb_update_stats(bp); | 
 | 1010 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1011 | 	/* Convert HW stats into netdevice stats */ | 
 | 1012 | 	nstat->rx_errors = (hwstat->rx_fcs_errors + | 
 | 1013 | 			    hwstat->rx_align_errors + | 
 | 1014 | 			    hwstat->rx_resource_errors + | 
 | 1015 | 			    hwstat->rx_overruns + | 
 | 1016 | 			    hwstat->rx_oversize_pkts + | 
 | 1017 | 			    hwstat->rx_jabbers + | 
 | 1018 | 			    hwstat->rx_undersize_pkts + | 
 | 1019 | 			    hwstat->sqe_test_errors + | 
 | 1020 | 			    hwstat->rx_length_mismatch); | 
 | 1021 | 	nstat->tx_errors = (hwstat->tx_late_cols + | 
 | 1022 | 			    hwstat->tx_excessive_cols + | 
 | 1023 | 			    hwstat->tx_underruns + | 
 | 1024 | 			    hwstat->tx_carrier_errors); | 
 | 1025 | 	nstat->collisions = (hwstat->tx_single_cols + | 
 | 1026 | 			     hwstat->tx_multiple_cols + | 
 | 1027 | 			     hwstat->tx_excessive_cols); | 
 | 1028 | 	nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | 
 | 1029 | 				   hwstat->rx_jabbers + | 
 | 1030 | 				   hwstat->rx_undersize_pkts + | 
 | 1031 | 				   hwstat->rx_length_mismatch); | 
| Alexander Stein | b19f7f7 | 2011-04-13 05:03:24 +0000 | [diff] [blame] | 1032 | 	nstat->rx_over_errors = hwstat->rx_resource_errors + | 
 | 1033 | 				   hwstat->rx_overruns; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1034 | 	nstat->rx_crc_errors = hwstat->rx_fcs_errors; | 
 | 1035 | 	nstat->rx_frame_errors = hwstat->rx_align_errors; | 
 | 1036 | 	nstat->rx_fifo_errors = hwstat->rx_overruns; | 
 | 1037 | 	/* XXX: What does "missed" mean? */ | 
 | 1038 | 	nstat->tx_aborted_errors = hwstat->tx_excessive_cols; | 
 | 1039 | 	nstat->tx_carrier_errors = hwstat->tx_carrier_errors; | 
 | 1040 | 	nstat->tx_fifo_errors = hwstat->tx_underruns; | 
 | 1041 | 	/* Don't know about heartbeat or window errors... */ | 
 | 1042 |  | 
 | 1043 | 	return nstat; | 
 | 1044 | } | 
 | 1045 |  | 
 | 1046 | static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
 | 1047 | { | 
 | 1048 | 	struct macb *bp = netdev_priv(dev); | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1049 | 	struct phy_device *phydev = bp->phy_dev; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1050 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1051 | 	if (!phydev) | 
 | 1052 | 		return -ENODEV; | 
 | 1053 |  | 
 | 1054 | 	return phy_ethtool_gset(phydev, cmd); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1055 | } | 
 | 1056 |  | 
 | 1057 | static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
 | 1058 | { | 
 | 1059 | 	struct macb *bp = netdev_priv(dev); | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1060 | 	struct phy_device *phydev = bp->phy_dev; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1061 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1062 | 	if (!phydev) | 
 | 1063 | 		return -ENODEV; | 
 | 1064 |  | 
 | 1065 | 	return phy_ethtool_sset(phydev, cmd); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1066 | } | 
 | 1067 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1068 | static void macb_get_drvinfo(struct net_device *dev, | 
 | 1069 | 			     struct ethtool_drvinfo *info) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1070 | { | 
 | 1071 | 	struct macb *bp = netdev_priv(dev); | 
 | 1072 |  | 
 | 1073 | 	strcpy(info->driver, bp->pdev->dev.driver->name); | 
 | 1074 | 	strcpy(info->version, "$Revision: 1.14 $"); | 
| Kay Sievers | db1d7bf | 2009-01-26 21:12:58 -0800 | [diff] [blame] | 1075 | 	strcpy(info->bus_info, dev_name(&bp->pdev->dev)); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1076 | } | 
 | 1077 |  | 
| Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 1078 | static const struct ethtool_ops macb_ethtool_ops = { | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1079 | 	.get_settings		= macb_get_settings, | 
 | 1080 | 	.set_settings		= macb_set_settings, | 
 | 1081 | 	.get_drvinfo		= macb_get_drvinfo, | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1082 | 	.get_link		= ethtool_op_get_link, | 
 | 1083 | }; | 
 | 1084 |  | 
 | 1085 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 
 | 1086 | { | 
 | 1087 | 	struct macb *bp = netdev_priv(dev); | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1088 | 	struct phy_device *phydev = bp->phy_dev; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1089 |  | 
 | 1090 | 	if (!netif_running(dev)) | 
 | 1091 | 		return -EINVAL; | 
 | 1092 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1093 | 	if (!phydev) | 
 | 1094 | 		return -ENODEV; | 
 | 1095 |  | 
| Richard Cochran | 28b0411 | 2010-07-17 08:48:55 +0000 | [diff] [blame] | 1096 | 	return phy_mii_ioctl(phydev, rq, cmd); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1097 | } | 
 | 1098 |  | 
| Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1099 | static const struct net_device_ops macb_netdev_ops = { | 
 | 1100 | 	.ndo_open		= macb_open, | 
 | 1101 | 	.ndo_stop		= macb_close, | 
 | 1102 | 	.ndo_start_xmit		= macb_start_xmit, | 
 | 1103 | 	.ndo_set_multicast_list	= macb_set_rx_mode, | 
 | 1104 | 	.ndo_get_stats		= macb_get_stats, | 
 | 1105 | 	.ndo_do_ioctl		= macb_ioctl, | 
 | 1106 | 	.ndo_validate_addr	= eth_validate_addr, | 
 | 1107 | 	.ndo_change_mtu		= eth_change_mtu, | 
 | 1108 | 	.ndo_set_mac_address	= eth_mac_addr, | 
| Thomas Petazzoni | 6e8cf5c | 2009-05-04 11:08:41 -0700 | [diff] [blame] | 1109 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 1110 | 	.ndo_poll_controller	= macb_poll_controller, | 
 | 1111 | #endif | 
| Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1112 | }; | 
 | 1113 |  | 
| Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 1114 | static int __init macb_probe(struct platform_device *pdev) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1115 | { | 
 | 1116 | 	struct eth_platform_data *pdata; | 
 | 1117 | 	struct resource *regs; | 
 | 1118 | 	struct net_device *dev; | 
 | 1119 | 	struct macb *bp; | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1120 | 	struct phy_device *phydev; | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1121 | 	unsigned long pclk_hz; | 
 | 1122 | 	u32 config; | 
 | 1123 | 	int err = -ENXIO; | 
 | 1124 |  | 
 | 1125 | 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 1126 | 	if (!regs) { | 
 | 1127 | 		dev_err(&pdev->dev, "no mmio resource defined\n"); | 
 | 1128 | 		goto err_out; | 
 | 1129 | 	} | 
 | 1130 |  | 
 | 1131 | 	err = -ENOMEM; | 
 | 1132 | 	dev = alloc_etherdev(sizeof(*bp)); | 
 | 1133 | 	if (!dev) { | 
 | 1134 | 		dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); | 
 | 1135 | 		goto err_out; | 
 | 1136 | 	} | 
 | 1137 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1138 | 	SET_NETDEV_DEV(dev, &pdev->dev); | 
 | 1139 |  | 
 | 1140 | 	/* TODO: Actually, we have some interesting features... */ | 
 | 1141 | 	dev->features |= 0; | 
 | 1142 |  | 
 | 1143 | 	bp = netdev_priv(dev); | 
 | 1144 | 	bp->pdev = pdev; | 
 | 1145 | 	bp->dev = dev; | 
 | 1146 |  | 
 | 1147 | 	spin_lock_init(&bp->lock); | 
 | 1148 |  | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1149 | #if defined(CONFIG_ARCH_AT91) | 
 | 1150 | 	bp->pclk = clk_get(&pdev->dev, "macb_clk"); | 
 | 1151 | 	if (IS_ERR(bp->pclk)) { | 
 | 1152 | 		dev_err(&pdev->dev, "failed to get macb_clk\n"); | 
 | 1153 | 		goto err_out_free_dev; | 
 | 1154 | 	} | 
 | 1155 | 	clk_enable(bp->pclk); | 
 | 1156 | #else | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1157 | 	bp->pclk = clk_get(&pdev->dev, "pclk"); | 
 | 1158 | 	if (IS_ERR(bp->pclk)) { | 
 | 1159 | 		dev_err(&pdev->dev, "failed to get pclk\n"); | 
 | 1160 | 		goto err_out_free_dev; | 
 | 1161 | 	} | 
 | 1162 | 	bp->hclk = clk_get(&pdev->dev, "hclk"); | 
 | 1163 | 	if (IS_ERR(bp->hclk)) { | 
 | 1164 | 		dev_err(&pdev->dev, "failed to get hclk\n"); | 
 | 1165 | 		goto err_out_put_pclk; | 
 | 1166 | 	} | 
 | 1167 |  | 
 | 1168 | 	clk_enable(bp->pclk); | 
 | 1169 | 	clk_enable(bp->hclk); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1170 | #endif | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1171 |  | 
 | 1172 | 	bp->regs = ioremap(regs->start, regs->end - regs->start + 1); | 
 | 1173 | 	if (!bp->regs) { | 
 | 1174 | 		dev_err(&pdev->dev, "failed to map registers, aborting.\n"); | 
 | 1175 | 		err = -ENOMEM; | 
 | 1176 | 		goto err_out_disable_clocks; | 
 | 1177 | 	} | 
 | 1178 |  | 
 | 1179 | 	dev->irq = platform_get_irq(pdev, 0); | 
| Javier Martinez Canillas | ab392d2 | 2011-03-28 16:27:31 +0000 | [diff] [blame] | 1180 | 	err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1181 | 	if (err) { | 
 | 1182 | 		printk(KERN_ERR | 
 | 1183 | 		       "%s: Unable to request IRQ %d (error %d)\n", | 
 | 1184 | 		       dev->name, dev->irq, err); | 
 | 1185 | 		goto err_out_iounmap; | 
 | 1186 | 	} | 
 | 1187 |  | 
| Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1188 | 	dev->netdev_ops = &macb_netdev_ops; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1189 | 	netif_napi_add(dev, &bp->napi, macb_poll, 64); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1190 | 	dev->ethtool_ops = &macb_ethtool_ops; | 
 | 1191 |  | 
 | 1192 | 	dev->base_addr = regs->start; | 
 | 1193 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1194 | 	/* Set MII management clock divider */ | 
 | 1195 | 	pclk_hz = clk_get_rate(bp->pclk); | 
 | 1196 | 	if (pclk_hz <= 20000000) | 
 | 1197 | 		config = MACB_BF(CLK, MACB_CLK_DIV8); | 
 | 1198 | 	else if (pclk_hz <= 40000000) | 
 | 1199 | 		config = MACB_BF(CLK, MACB_CLK_DIV16); | 
 | 1200 | 	else if (pclk_hz <= 80000000) | 
 | 1201 | 		config = MACB_BF(CLK, MACB_CLK_DIV32); | 
 | 1202 | 	else | 
 | 1203 | 		config = MACB_BF(CLK, MACB_CLK_DIV64); | 
 | 1204 | 	macb_writel(bp, NCFGR, config); | 
 | 1205 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1206 | 	macb_get_hwaddr(bp); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1207 | 	pdata = pdev->dev.platform_data; | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1208 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1209 | 	if (pdata && pdata->is_rmii) | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1210 | #if defined(CONFIG_ARCH_AT91) | 
 | 1211 | 		macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) ); | 
 | 1212 | #else | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1213 | 		macb_writel(bp, USRIO, 0); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1214 | #endif | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1215 | 	else | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1216 | #if defined(CONFIG_ARCH_AT91) | 
 | 1217 | 		macb_writel(bp, USRIO, MACB_BIT(CLKEN)); | 
 | 1218 | #else | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1219 | 		macb_writel(bp, USRIO, MACB_BIT(MII)); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1220 | #endif | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1221 |  | 
 | 1222 | 	bp->tx_pending = DEF_TX_RING_PENDING; | 
 | 1223 |  | 
 | 1224 | 	err = register_netdev(dev); | 
 | 1225 | 	if (err) { | 
 | 1226 | 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | 
 | 1227 | 		goto err_out_free_irq; | 
 | 1228 | 	} | 
 | 1229 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1230 | 	if (macb_mii_init(bp) != 0) { | 
 | 1231 | 		goto err_out_unregister_netdev; | 
 | 1232 | 	} | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1233 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1234 | 	platform_set_drvdata(pdev, dev); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1235 |  | 
| Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 1236 | 	printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n", | 
 | 1237 | 	       dev->name, dev->base_addr, dev->irq, dev->dev_addr); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1238 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1239 | 	phydev = bp->phy_dev; | 
 | 1240 | 	printk(KERN_INFO "%s: attached PHY driver [%s] " | 
| Kay Sievers | db1d7bf | 2009-01-26 21:12:58 -0800 | [diff] [blame] | 1241 | 		"(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, | 
 | 1242 | 		phydev->drv->name, dev_name(&phydev->dev), phydev->irq); | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1243 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1244 | 	return 0; | 
 | 1245 |  | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1246 | err_out_unregister_netdev: | 
 | 1247 | 	unregister_netdev(dev); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1248 | err_out_free_irq: | 
 | 1249 | 	free_irq(dev->irq, dev); | 
 | 1250 | err_out_iounmap: | 
 | 1251 | 	iounmap(bp->regs); | 
 | 1252 | err_out_disable_clocks: | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1253 | #ifndef CONFIG_ARCH_AT91 | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1254 | 	clk_disable(bp->hclk); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1255 | 	clk_put(bp->hclk); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1256 | #endif | 
 | 1257 | 	clk_disable(bp->pclk); | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1258 | #ifndef CONFIG_ARCH_AT91 | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1259 | err_out_put_pclk: | 
| frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1260 | #endif | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1261 | 	clk_put(bp->pclk); | 
 | 1262 | err_out_free_dev: | 
 | 1263 | 	free_netdev(dev); | 
 | 1264 | err_out: | 
 | 1265 | 	platform_set_drvdata(pdev, NULL); | 
 | 1266 | 	return err; | 
 | 1267 | } | 
 | 1268 |  | 
| Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 1269 | static int __exit macb_remove(struct platform_device *pdev) | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1270 | { | 
 | 1271 | 	struct net_device *dev; | 
 | 1272 | 	struct macb *bp; | 
 | 1273 |  | 
 | 1274 | 	dev = platform_get_drvdata(pdev); | 
 | 1275 |  | 
 | 1276 | 	if (dev) { | 
 | 1277 | 		bp = netdev_priv(dev); | 
| Atsushi Nemoto | 84b7901 | 2008-04-10 23:30:07 +0900 | [diff] [blame] | 1278 | 		if (bp->phy_dev) | 
 | 1279 | 			phy_disconnect(bp->phy_dev); | 
| Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 1280 | 		mdiobus_unregister(bp->mii_bus); | 
 | 1281 | 		kfree(bp->mii_bus->irq); | 
 | 1282 | 		mdiobus_free(bp->mii_bus); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1283 | 		unregister_netdev(dev); | 
 | 1284 | 		free_irq(dev->irq, dev); | 
 | 1285 | 		iounmap(bp->regs); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1286 | #ifndef CONFIG_ARCH_AT91 | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1287 | 		clk_disable(bp->hclk); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1288 | 		clk_put(bp->hclk); | 
| Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1289 | #endif | 
 | 1290 | 		clk_disable(bp->pclk); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1291 | 		clk_put(bp->pclk); | 
 | 1292 | 		free_netdev(dev); | 
 | 1293 | 		platform_set_drvdata(pdev, NULL); | 
 | 1294 | 	} | 
 | 1295 |  | 
 | 1296 | 	return 0; | 
 | 1297 | } | 
 | 1298 |  | 
| Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 1299 | #ifdef CONFIG_PM | 
 | 1300 | static int macb_suspend(struct platform_device *pdev, pm_message_t state) | 
 | 1301 | { | 
 | 1302 | 	struct net_device *netdev = platform_get_drvdata(pdev); | 
 | 1303 | 	struct macb *bp = netdev_priv(netdev); | 
 | 1304 |  | 
 | 1305 | 	netif_device_detach(netdev); | 
 | 1306 |  | 
 | 1307 | #ifndef CONFIG_ARCH_AT91 | 
 | 1308 | 	clk_disable(bp->hclk); | 
 | 1309 | #endif | 
 | 1310 | 	clk_disable(bp->pclk); | 
 | 1311 |  | 
 | 1312 | 	return 0; | 
 | 1313 | } | 
 | 1314 |  | 
 | 1315 | static int macb_resume(struct platform_device *pdev) | 
 | 1316 | { | 
 | 1317 | 	struct net_device *netdev = platform_get_drvdata(pdev); | 
 | 1318 | 	struct macb *bp = netdev_priv(netdev); | 
 | 1319 |  | 
 | 1320 | 	clk_enable(bp->pclk); | 
 | 1321 | #ifndef CONFIG_ARCH_AT91 | 
 | 1322 | 	clk_enable(bp->hclk); | 
 | 1323 | #endif | 
 | 1324 |  | 
 | 1325 | 	netif_device_attach(netdev); | 
 | 1326 |  | 
 | 1327 | 	return 0; | 
 | 1328 | } | 
 | 1329 | #else | 
 | 1330 | #define macb_suspend	NULL | 
 | 1331 | #define macb_resume	NULL | 
 | 1332 | #endif | 
 | 1333 |  | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1334 | static struct platform_driver macb_driver = { | 
| Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 1335 | 	.remove		= __exit_p(macb_remove), | 
| Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 1336 | 	.suspend	= macb_suspend, | 
 | 1337 | 	.resume		= macb_resume, | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1338 | 	.driver		= { | 
 | 1339 | 		.name		= "macb", | 
| Kay Sievers | 72abb46 | 2008-04-18 13:50:44 -0700 | [diff] [blame] | 1340 | 		.owner	= THIS_MODULE, | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1341 | 	}, | 
 | 1342 | }; | 
 | 1343 |  | 
 | 1344 | static int __init macb_init(void) | 
 | 1345 | { | 
| Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 1346 | 	return platform_driver_probe(&macb_driver, macb_probe); | 
| Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1347 | } | 
 | 1348 |  | 
 | 1349 | static void __exit macb_exit(void) | 
 | 1350 | { | 
 | 1351 | 	platform_driver_unregister(&macb_driver); | 
 | 1352 | } | 
 | 1353 |  | 
 | 1354 | module_init(macb_init); | 
 | 1355 | module_exit(macb_exit); | 
 | 1356 |  | 
 | 1357 | MODULE_LICENSE("GPL"); | 
 | 1358 | MODULE_DESCRIPTION("Atmel MACB Ethernet driver"); | 
| Jean Delvare | e05503e | 2011-05-18 16:49:24 +0200 | [diff] [blame] | 1359 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); | 
| Kay Sievers | 72abb46 | 2008-04-18 13:50:44 -0700 | [diff] [blame] | 1360 | MODULE_ALIAS("platform:macb"); |