| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Driver for BCM963xx builtin Ethernet mac | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License as published by | 
 | 8 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 9 |  * (at your option) any later version. | 
 | 10 |  * | 
 | 11 |  * This program is distributed in the hope that it will be useful, | 
 | 12 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 13 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 14 |  * GNU General Public License for more details. | 
 | 15 |  * | 
 | 16 |  * You should have received a copy of the GNU General Public License | 
 | 17 |  * along with this program; if not, write to the Free Software | 
 | 18 |  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 
 | 19 |  */ | 
 | 20 | #include <linux/init.h> | 
| Alexey Dobriyan | 539d3ee | 2011-06-10 03:36:43 +0000 | [diff] [blame] | 21 | #include <linux/interrupt.h> | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 22 | #include <linux/module.h> | 
 | 23 | #include <linux/clk.h> | 
 | 24 | #include <linux/etherdevice.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/slab.h> | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 26 | #include <linux/delay.h> | 
 | 27 | #include <linux/ethtool.h> | 
 | 28 | #include <linux/crc32.h> | 
 | 29 | #include <linux/err.h> | 
 | 30 | #include <linux/dma-mapping.h> | 
 | 31 | #include <linux/platform_device.h> | 
 | 32 | #include <linux/if_vlan.h> | 
 | 33 |  | 
 | 34 | #include <bcm63xx_dev_enet.h> | 
 | 35 | #include "bcm63xx_enet.h" | 
 | 36 |  | 
 | 37 | static char bcm_enet_driver_name[] = "bcm63xx_enet"; | 
 | 38 | static char bcm_enet_driver_version[] = "1.0"; | 
 | 39 |  | 
 | 40 | static int copybreak __read_mostly = 128; | 
 | 41 | module_param(copybreak, int, 0); | 
 | 42 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); | 
 | 43 |  | 
 | 44 | /* io memory shared between all devices */ | 
 | 45 | static void __iomem *bcm_enet_shared_base; | 
 | 46 |  | 
 | 47 | /* | 
 | 48 |  * io helpers to access mac registers | 
 | 49 |  */ | 
 | 50 | static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) | 
 | 51 | { | 
 | 52 | 	return bcm_readl(priv->base + off); | 
 | 53 | } | 
 | 54 |  | 
 | 55 | static inline void enet_writel(struct bcm_enet_priv *priv, | 
 | 56 | 			       u32 val, u32 off) | 
 | 57 | { | 
 | 58 | 	bcm_writel(val, priv->base + off); | 
 | 59 | } | 
 | 60 |  | 
 | 61 | /* | 
 | 62 |  * io helpers to access shared registers | 
 | 63 |  */ | 
 | 64 | static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) | 
 | 65 | { | 
 | 66 | 	return bcm_readl(bcm_enet_shared_base + off); | 
 | 67 | } | 
 | 68 |  | 
 | 69 | static inline void enet_dma_writel(struct bcm_enet_priv *priv, | 
 | 70 | 				       u32 val, u32 off) | 
 | 71 | { | 
 | 72 | 	bcm_writel(val, bcm_enet_shared_base + off); | 
 | 73 | } | 
 | 74 |  | 
 | 75 | /* | 
 | 76 |  * write given data into mii register and wait for transfer to end | 
 | 77 |  * with timeout (average measured transfer time is 25us) | 
 | 78 |  */ | 
 | 79 | static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) | 
 | 80 | { | 
 | 81 | 	int limit; | 
 | 82 |  | 
 | 83 | 	/* make sure mii interrupt status is cleared */ | 
 | 84 | 	enet_writel(priv, ENET_IR_MII, ENET_IR_REG); | 
 | 85 |  | 
 | 86 | 	enet_writel(priv, data, ENET_MIIDATA_REG); | 
 | 87 | 	wmb(); | 
 | 88 |  | 
 | 89 | 	/* busy wait on mii interrupt bit, with timeout */ | 
 | 90 | 	limit = 1000; | 
 | 91 | 	do { | 
 | 92 | 		if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) | 
 | 93 | 			break; | 
 | 94 | 		udelay(1); | 
| roel kluin | ec1652a | 2009-09-21 10:08:48 +0000 | [diff] [blame] | 95 | 	} while (limit-- > 0); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 96 |  | 
 | 97 | 	return (limit < 0) ? 1 : 0; | 
 | 98 | } | 
 | 99 |  | 
 | 100 | /* | 
 | 101 |  * MII internal read callback | 
 | 102 |  */ | 
 | 103 | static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, | 
 | 104 | 			      int regnum) | 
 | 105 | { | 
 | 106 | 	u32 tmp, val; | 
 | 107 |  | 
 | 108 | 	tmp = regnum << ENET_MIIDATA_REG_SHIFT; | 
 | 109 | 	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; | 
 | 110 | 	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; | 
 | 111 | 	tmp |= ENET_MIIDATA_OP_READ_MASK; | 
 | 112 |  | 
 | 113 | 	if (do_mdio_op(priv, tmp)) | 
 | 114 | 		return -1; | 
 | 115 |  | 
 | 116 | 	val = enet_readl(priv, ENET_MIIDATA_REG); | 
 | 117 | 	val &= 0xffff; | 
 | 118 | 	return val; | 
 | 119 | } | 
 | 120 |  | 
 | 121 | /* | 
 | 122 |  * MII internal write callback | 
 | 123 |  */ | 
 | 124 | static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, | 
 | 125 | 			       int regnum, u16 value) | 
 | 126 | { | 
 | 127 | 	u32 tmp; | 
 | 128 |  | 
 | 129 | 	tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; | 
 | 130 | 	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; | 
 | 131 | 	tmp |= regnum << ENET_MIIDATA_REG_SHIFT; | 
 | 132 | 	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; | 
 | 133 | 	tmp |= ENET_MIIDATA_OP_WRITE_MASK; | 
 | 134 |  | 
 | 135 | 	(void)do_mdio_op(priv, tmp); | 
 | 136 | 	return 0; | 
 | 137 | } | 
 | 138 |  | 
 | 139 | /* | 
 | 140 |  * MII read callback from phylib | 
 | 141 |  */ | 
 | 142 | static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, | 
 | 143 | 				     int regnum) | 
 | 144 | { | 
 | 145 | 	return bcm_enet_mdio_read(bus->priv, mii_id, regnum); | 
 | 146 | } | 
 | 147 |  | 
 | 148 | /* | 
 | 149 |  * MII write callback from phylib | 
 | 150 |  */ | 
 | 151 | static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, | 
 | 152 | 				      int regnum, u16 value) | 
 | 153 | { | 
 | 154 | 	return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); | 
 | 155 | } | 
 | 156 |  | 
 | 157 | /* | 
 | 158 |  * MII read callback from mii core | 
 | 159 |  */ | 
 | 160 | static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, | 
 | 161 | 				  int regnum) | 
 | 162 | { | 
 | 163 | 	return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); | 
 | 164 | } | 
 | 165 |  | 
 | 166 | /* | 
 | 167 |  * MII write callback from mii core | 
 | 168 |  */ | 
 | 169 | static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, | 
 | 170 | 				    int regnum, int value) | 
 | 171 | { | 
 | 172 | 	bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); | 
 | 173 | } | 
 | 174 |  | 
 | 175 | /* | 
 | 176 |  * refill rx queue | 
 | 177 |  */ | 
 | 178 | static int bcm_enet_refill_rx(struct net_device *dev) | 
 | 179 | { | 
 | 180 | 	struct bcm_enet_priv *priv; | 
 | 181 |  | 
 | 182 | 	priv = netdev_priv(dev); | 
 | 183 |  | 
 | 184 | 	while (priv->rx_desc_count < priv->rx_ring_size) { | 
 | 185 | 		struct bcm_enet_desc *desc; | 
 | 186 | 		struct sk_buff *skb; | 
 | 187 | 		dma_addr_t p; | 
 | 188 | 		int desc_idx; | 
 | 189 | 		u32 len_stat; | 
 | 190 |  | 
 | 191 | 		desc_idx = priv->rx_dirty_desc; | 
 | 192 | 		desc = &priv->rx_desc_cpu[desc_idx]; | 
 | 193 |  | 
 | 194 | 		if (!priv->rx_skb[desc_idx]) { | 
 | 195 | 			skb = netdev_alloc_skb(dev, priv->rx_skb_size); | 
 | 196 | 			if (!skb) | 
 | 197 | 				break; | 
 | 198 | 			priv->rx_skb[desc_idx] = skb; | 
 | 199 |  | 
 | 200 | 			p = dma_map_single(&priv->pdev->dev, skb->data, | 
 | 201 | 					   priv->rx_skb_size, | 
 | 202 | 					   DMA_FROM_DEVICE); | 
 | 203 | 			desc->address = p; | 
 | 204 | 		} | 
 | 205 |  | 
 | 206 | 		len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; | 
 | 207 | 		len_stat |= DMADESC_OWNER_MASK; | 
 | 208 | 		if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { | 
 | 209 | 			len_stat |= DMADESC_WRAP_MASK; | 
 | 210 | 			priv->rx_dirty_desc = 0; | 
 | 211 | 		} else { | 
 | 212 | 			priv->rx_dirty_desc++; | 
 | 213 | 		} | 
 | 214 | 		wmb(); | 
 | 215 | 		desc->len_stat = len_stat; | 
 | 216 |  | 
 | 217 | 		priv->rx_desc_count++; | 
 | 218 |  | 
 | 219 | 		/* tell dma engine we allocated one buffer */ | 
 | 220 | 		enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); | 
 | 221 | 	} | 
 | 222 |  | 
 | 223 | 	/* If rx ring is still empty, set a timer to try allocating | 
 | 224 | 	 * again at a later time. */ | 
 | 225 | 	if (priv->rx_desc_count == 0 && netif_running(dev)) { | 
 | 226 | 		dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); | 
 | 227 | 		priv->rx_timeout.expires = jiffies + HZ; | 
 | 228 | 		add_timer(&priv->rx_timeout); | 
 | 229 | 	} | 
 | 230 |  | 
 | 231 | 	return 0; | 
 | 232 | } | 
 | 233 |  | 
 | 234 | /* | 
 | 235 |  * timer callback to defer refill rx queue in case we're OOM | 
 | 236 |  */ | 
 | 237 | static void bcm_enet_refill_rx_timer(unsigned long data) | 
 | 238 | { | 
 | 239 | 	struct net_device *dev; | 
 | 240 | 	struct bcm_enet_priv *priv; | 
 | 241 |  | 
 | 242 | 	dev = (struct net_device *)data; | 
 | 243 | 	priv = netdev_priv(dev); | 
 | 244 |  | 
 | 245 | 	spin_lock(&priv->rx_lock); | 
 | 246 | 	bcm_enet_refill_rx((struct net_device *)data); | 
 | 247 | 	spin_unlock(&priv->rx_lock); | 
 | 248 | } | 
 | 249 |  | 
 | 250 | /* | 
 | 251 |  * extract packet from rx queue | 
 | 252 |  */ | 
 | 253 | static int bcm_enet_receive_queue(struct net_device *dev, int budget) | 
 | 254 | { | 
 | 255 | 	struct bcm_enet_priv *priv; | 
 | 256 | 	struct device *kdev; | 
 | 257 | 	int processed; | 
 | 258 |  | 
 | 259 | 	priv = netdev_priv(dev); | 
 | 260 | 	kdev = &priv->pdev->dev; | 
 | 261 | 	processed = 0; | 
 | 262 |  | 
 | 263 | 	/* don't scan ring further than number of refilled | 
 | 264 | 	 * descriptor */ | 
 | 265 | 	if (budget > priv->rx_desc_count) | 
 | 266 | 		budget = priv->rx_desc_count; | 
 | 267 |  | 
 | 268 | 	do { | 
 | 269 | 		struct bcm_enet_desc *desc; | 
 | 270 | 		struct sk_buff *skb; | 
 | 271 | 		int desc_idx; | 
 | 272 | 		u32 len_stat; | 
 | 273 | 		unsigned int len; | 
 | 274 |  | 
 | 275 | 		desc_idx = priv->rx_curr_desc; | 
 | 276 | 		desc = &priv->rx_desc_cpu[desc_idx]; | 
 | 277 |  | 
 | 278 | 		/* make sure we actually read the descriptor status at | 
 | 279 | 		 * each loop */ | 
 | 280 | 		rmb(); | 
 | 281 |  | 
 | 282 | 		len_stat = desc->len_stat; | 
 | 283 |  | 
 | 284 | 		/* break if dma ownership belongs to hw */ | 
 | 285 | 		if (len_stat & DMADESC_OWNER_MASK) | 
 | 286 | 			break; | 
 | 287 |  | 
 | 288 | 		processed++; | 
 | 289 | 		priv->rx_curr_desc++; | 
 | 290 | 		if (priv->rx_curr_desc == priv->rx_ring_size) | 
 | 291 | 			priv->rx_curr_desc = 0; | 
 | 292 | 		priv->rx_desc_count--; | 
 | 293 |  | 
 | 294 | 		/* if the packet does not have start of packet _and_ | 
 | 295 | 		 * end of packet flag set, then just recycle it */ | 
 | 296 | 		if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 297 | 			dev->stats.rx_dropped++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 298 | 			continue; | 
 | 299 | 		} | 
 | 300 |  | 
 | 301 | 		/* recycle packet if it's marked as bad */ | 
 | 302 | 		if (unlikely(len_stat & DMADESC_ERR_MASK)) { | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 303 | 			dev->stats.rx_errors++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 304 |  | 
 | 305 | 			if (len_stat & DMADESC_OVSIZE_MASK) | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 306 | 				dev->stats.rx_length_errors++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 307 | 			if (len_stat & DMADESC_CRC_MASK) | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 308 | 				dev->stats.rx_crc_errors++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 309 | 			if (len_stat & DMADESC_UNDER_MASK) | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 310 | 				dev->stats.rx_frame_errors++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 311 | 			if (len_stat & DMADESC_OV_MASK) | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 312 | 				dev->stats.rx_fifo_errors++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 313 | 			continue; | 
 | 314 | 		} | 
 | 315 |  | 
 | 316 | 		/* valid packet */ | 
 | 317 | 		skb = priv->rx_skb[desc_idx]; | 
 | 318 | 		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; | 
 | 319 | 		/* don't include FCS */ | 
 | 320 | 		len -= 4; | 
 | 321 |  | 
 | 322 | 		if (len < copybreak) { | 
 | 323 | 			struct sk_buff *nskb; | 
 | 324 |  | 
| Eric Dumazet | 89d71a6 | 2009-10-13 05:34:20 +0000 | [diff] [blame] | 325 | 			nskb = netdev_alloc_skb_ip_align(dev, len); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 326 | 			if (!nskb) { | 
 | 327 | 				/* forget packet, just rearm desc */ | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 328 | 				dev->stats.rx_dropped++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 329 | 				continue; | 
 | 330 | 			} | 
 | 331 |  | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 332 | 			dma_sync_single_for_cpu(kdev, desc->address, | 
 | 333 | 						len, DMA_FROM_DEVICE); | 
 | 334 | 			memcpy(nskb->data, skb->data, len); | 
 | 335 | 			dma_sync_single_for_device(kdev, desc->address, | 
 | 336 | 						   len, DMA_FROM_DEVICE); | 
 | 337 | 			skb = nskb; | 
 | 338 | 		} else { | 
 | 339 | 			dma_unmap_single(&priv->pdev->dev, desc->address, | 
 | 340 | 					 priv->rx_skb_size, DMA_FROM_DEVICE); | 
 | 341 | 			priv->rx_skb[desc_idx] = NULL; | 
 | 342 | 		} | 
 | 343 |  | 
 | 344 | 		skb_put(skb, len); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 345 | 		skb->protocol = eth_type_trans(skb, dev); | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 346 | 		dev->stats.rx_packets++; | 
 | 347 | 		dev->stats.rx_bytes += len; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 348 | 		netif_receive_skb(skb); | 
 | 349 |  | 
 | 350 | 	} while (--budget > 0); | 
 | 351 |  | 
 | 352 | 	if (processed || !priv->rx_desc_count) { | 
 | 353 | 		bcm_enet_refill_rx(dev); | 
 | 354 |  | 
 | 355 | 		/* kick rx dma */ | 
 | 356 | 		enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, | 
 | 357 | 				ENETDMA_CHANCFG_REG(priv->rx_chan)); | 
 | 358 | 	} | 
 | 359 |  | 
 | 360 | 	return processed; | 
 | 361 | } | 
 | 362 |  | 
 | 363 |  | 
 | 364 | /* | 
 | 365 |  * try to or force reclaim of transmitted buffers | 
 | 366 |  */ | 
 | 367 | static int bcm_enet_tx_reclaim(struct net_device *dev, int force) | 
 | 368 | { | 
 | 369 | 	struct bcm_enet_priv *priv; | 
 | 370 | 	int released; | 
 | 371 |  | 
 | 372 | 	priv = netdev_priv(dev); | 
 | 373 | 	released = 0; | 
 | 374 |  | 
 | 375 | 	while (priv->tx_desc_count < priv->tx_ring_size) { | 
 | 376 | 		struct bcm_enet_desc *desc; | 
 | 377 | 		struct sk_buff *skb; | 
 | 378 |  | 
 | 379 | 		/* We run in a bh and fight against start_xmit, which | 
 | 380 | 		 * is called with bh disabled  */ | 
 | 381 | 		spin_lock(&priv->tx_lock); | 
 | 382 |  | 
 | 383 | 		desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; | 
 | 384 |  | 
 | 385 | 		if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { | 
 | 386 | 			spin_unlock(&priv->tx_lock); | 
 | 387 | 			break; | 
 | 388 | 		} | 
 | 389 |  | 
 | 390 | 		/* ensure other field of the descriptor were not read | 
 | 391 | 		 * before we checked ownership */ | 
 | 392 | 		rmb(); | 
 | 393 |  | 
 | 394 | 		skb = priv->tx_skb[priv->tx_dirty_desc]; | 
 | 395 | 		priv->tx_skb[priv->tx_dirty_desc] = NULL; | 
 | 396 | 		dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, | 
 | 397 | 				 DMA_TO_DEVICE); | 
 | 398 |  | 
 | 399 | 		priv->tx_dirty_desc++; | 
 | 400 | 		if (priv->tx_dirty_desc == priv->tx_ring_size) | 
 | 401 | 			priv->tx_dirty_desc = 0; | 
 | 402 | 		priv->tx_desc_count++; | 
 | 403 |  | 
 | 404 | 		spin_unlock(&priv->tx_lock); | 
 | 405 |  | 
 | 406 | 		if (desc->len_stat & DMADESC_UNDER_MASK) | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 407 | 			dev->stats.tx_errors++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 408 |  | 
 | 409 | 		dev_kfree_skb(skb); | 
 | 410 | 		released++; | 
 | 411 | 	} | 
 | 412 |  | 
 | 413 | 	if (netif_queue_stopped(dev) && released) | 
 | 414 | 		netif_wake_queue(dev); | 
 | 415 |  | 
 | 416 | 	return released; | 
 | 417 | } | 
 | 418 |  | 
 | 419 | /* | 
 | 420 |  * poll func, called by network core | 
 | 421 |  */ | 
 | 422 | static int bcm_enet_poll(struct napi_struct *napi, int budget) | 
 | 423 | { | 
 | 424 | 	struct bcm_enet_priv *priv; | 
 | 425 | 	struct net_device *dev; | 
 | 426 | 	int tx_work_done, rx_work_done; | 
 | 427 |  | 
 | 428 | 	priv = container_of(napi, struct bcm_enet_priv, napi); | 
 | 429 | 	dev = priv->net_dev; | 
 | 430 |  | 
 | 431 | 	/* ack interrupts */ | 
 | 432 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 433 | 			ENETDMA_IR_REG(priv->rx_chan)); | 
 | 434 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 435 | 			ENETDMA_IR_REG(priv->tx_chan)); | 
 | 436 |  | 
 | 437 | 	/* reclaim sent skb */ | 
 | 438 | 	tx_work_done = bcm_enet_tx_reclaim(dev, 0); | 
 | 439 |  | 
 | 440 | 	spin_lock(&priv->rx_lock); | 
 | 441 | 	rx_work_done = bcm_enet_receive_queue(dev, budget); | 
 | 442 | 	spin_unlock(&priv->rx_lock); | 
 | 443 |  | 
 | 444 | 	if (rx_work_done >= budget || tx_work_done > 0) { | 
 | 445 | 		/* rx/tx queue is not yet empty/clean */ | 
 | 446 | 		return rx_work_done; | 
 | 447 | 	} | 
 | 448 |  | 
 | 449 | 	/* no more packet in rx/tx queue, remove device from poll | 
 | 450 | 	 * queue */ | 
 | 451 | 	napi_complete(napi); | 
 | 452 |  | 
 | 453 | 	/* restore rx/tx interrupt */ | 
 | 454 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 455 | 			ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 456 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 457 | 			ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 458 |  | 
 | 459 | 	return rx_work_done; | 
 | 460 | } | 
 | 461 |  | 
 | 462 | /* | 
 | 463 |  * mac interrupt handler | 
 | 464 |  */ | 
 | 465 | static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) | 
 | 466 | { | 
 | 467 | 	struct net_device *dev; | 
 | 468 | 	struct bcm_enet_priv *priv; | 
 | 469 | 	u32 stat; | 
 | 470 |  | 
 | 471 | 	dev = dev_id; | 
 | 472 | 	priv = netdev_priv(dev); | 
 | 473 |  | 
 | 474 | 	stat = enet_readl(priv, ENET_IR_REG); | 
 | 475 | 	if (!(stat & ENET_IR_MIB)) | 
 | 476 | 		return IRQ_NONE; | 
 | 477 |  | 
 | 478 | 	/* clear & mask interrupt */ | 
 | 479 | 	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); | 
 | 480 | 	enet_writel(priv, 0, ENET_IRMASK_REG); | 
 | 481 |  | 
 | 482 | 	/* read mib registers in workqueue */ | 
 | 483 | 	schedule_work(&priv->mib_update_task); | 
 | 484 |  | 
 | 485 | 	return IRQ_HANDLED; | 
 | 486 | } | 
 | 487 |  | 
 | 488 | /* | 
 | 489 |  * rx/tx dma interrupt handler | 
 | 490 |  */ | 
 | 491 | static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) | 
 | 492 | { | 
 | 493 | 	struct net_device *dev; | 
 | 494 | 	struct bcm_enet_priv *priv; | 
 | 495 |  | 
 | 496 | 	dev = dev_id; | 
 | 497 | 	priv = netdev_priv(dev); | 
 | 498 |  | 
 | 499 | 	/* mask rx/tx interrupts */ | 
 | 500 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 501 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 502 |  | 
 | 503 | 	napi_schedule(&priv->napi); | 
 | 504 |  | 
 | 505 | 	return IRQ_HANDLED; | 
 | 506 | } | 
 | 507 |  | 
 | 508 | /* | 
 | 509 |  * tx request callback | 
 | 510 |  */ | 
 | 511 | static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 
 | 512 | { | 
 | 513 | 	struct bcm_enet_priv *priv; | 
 | 514 | 	struct bcm_enet_desc *desc; | 
 | 515 | 	u32 len_stat; | 
 | 516 | 	int ret; | 
 | 517 |  | 
 | 518 | 	priv = netdev_priv(dev); | 
 | 519 |  | 
 | 520 | 	/* lock against tx reclaim */ | 
 | 521 | 	spin_lock(&priv->tx_lock); | 
 | 522 |  | 
 | 523 | 	/* make sure  the tx hw queue  is not full,  should not happen | 
 | 524 | 	 * since we stop queue before it's the case */ | 
 | 525 | 	if (unlikely(!priv->tx_desc_count)) { | 
 | 526 | 		netif_stop_queue(dev); | 
 | 527 | 		dev_err(&priv->pdev->dev, "xmit called with no tx desc " | 
 | 528 | 			"available?\n"); | 
 | 529 | 		ret = NETDEV_TX_BUSY; | 
 | 530 | 		goto out_unlock; | 
 | 531 | 	} | 
 | 532 |  | 
 | 533 | 	/* point to the next available desc */ | 
 | 534 | 	desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; | 
 | 535 | 	priv->tx_skb[priv->tx_curr_desc] = skb; | 
 | 536 |  | 
 | 537 | 	/* fill descriptor */ | 
 | 538 | 	desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, | 
 | 539 | 				       DMA_TO_DEVICE); | 
 | 540 |  | 
 | 541 | 	len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; | 
 | 542 | 	len_stat |= DMADESC_ESOP_MASK | | 
 | 543 | 		DMADESC_APPEND_CRC | | 
 | 544 | 		DMADESC_OWNER_MASK; | 
 | 545 |  | 
 | 546 | 	priv->tx_curr_desc++; | 
 | 547 | 	if (priv->tx_curr_desc == priv->tx_ring_size) { | 
 | 548 | 		priv->tx_curr_desc = 0; | 
 | 549 | 		len_stat |= DMADESC_WRAP_MASK; | 
 | 550 | 	} | 
 | 551 | 	priv->tx_desc_count--; | 
 | 552 |  | 
 | 553 | 	/* dma might be already polling, make sure we update desc | 
 | 554 | 	 * fields in correct order */ | 
 | 555 | 	wmb(); | 
 | 556 | 	desc->len_stat = len_stat; | 
 | 557 | 	wmb(); | 
 | 558 |  | 
 | 559 | 	/* kick tx dma */ | 
 | 560 | 	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, | 
 | 561 | 			ENETDMA_CHANCFG_REG(priv->tx_chan)); | 
 | 562 |  | 
 | 563 | 	/* stop queue if no more desc available */ | 
 | 564 | 	if (!priv->tx_desc_count) | 
 | 565 | 		netif_stop_queue(dev); | 
 | 566 |  | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 567 | 	dev->stats.tx_bytes += skb->len; | 
 | 568 | 	dev->stats.tx_packets++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 569 | 	ret = NETDEV_TX_OK; | 
 | 570 |  | 
 | 571 | out_unlock: | 
 | 572 | 	spin_unlock(&priv->tx_lock); | 
 | 573 | 	return ret; | 
 | 574 | } | 
 | 575 |  | 
 | 576 | /* | 
 | 577 |  * Change the interface's mac address. | 
 | 578 |  */ | 
 | 579 | static int bcm_enet_set_mac_address(struct net_device *dev, void *p) | 
 | 580 | { | 
 | 581 | 	struct bcm_enet_priv *priv; | 
 | 582 | 	struct sockaddr *addr = p; | 
 | 583 | 	u32 val; | 
 | 584 |  | 
 | 585 | 	priv = netdev_priv(dev); | 
 | 586 | 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 
 | 587 |  | 
 | 588 | 	/* use perfect match register 0 to store my mac address */ | 
 | 589 | 	val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | | 
 | 590 | 		(dev->dev_addr[4] << 8) | dev->dev_addr[5]; | 
 | 591 | 	enet_writel(priv, val, ENET_PML_REG(0)); | 
 | 592 |  | 
 | 593 | 	val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); | 
 | 594 | 	val |= ENET_PMH_DATAVALID_MASK; | 
 | 595 | 	enet_writel(priv, val, ENET_PMH_REG(0)); | 
 | 596 |  | 
 | 597 | 	return 0; | 
 | 598 | } | 
 | 599 |  | 
 | 600 | /* | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 601 |  * Change rx mode (promiscuous/allmulti) and update multicast list | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 602 |  */ | 
 | 603 | static void bcm_enet_set_multicast_list(struct net_device *dev) | 
 | 604 | { | 
 | 605 | 	struct bcm_enet_priv *priv; | 
| Jiri Pirko | 22bedad | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 606 | 	struct netdev_hw_addr *ha; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 607 | 	u32 val; | 
 | 608 | 	int i; | 
 | 609 |  | 
 | 610 | 	priv = netdev_priv(dev); | 
 | 611 |  | 
 | 612 | 	val = enet_readl(priv, ENET_RXCFG_REG); | 
 | 613 |  | 
 | 614 | 	if (dev->flags & IFF_PROMISC) | 
 | 615 | 		val |= ENET_RXCFG_PROMISC_MASK; | 
 | 616 | 	else | 
 | 617 | 		val &= ~ENET_RXCFG_PROMISC_MASK; | 
 | 618 |  | 
 | 619 | 	/* only 3 perfect match registers left, first one is used for | 
 | 620 | 	 * own mac address */ | 
| Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 621 | 	if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 622 | 		val |= ENET_RXCFG_ALLMCAST_MASK; | 
 | 623 | 	else | 
 | 624 | 		val &= ~ENET_RXCFG_ALLMCAST_MASK; | 
 | 625 |  | 
 | 626 | 	/* no need to set perfect match registers if we catch all | 
 | 627 | 	 * multicast */ | 
 | 628 | 	if (val & ENET_RXCFG_ALLMCAST_MASK) { | 
 | 629 | 		enet_writel(priv, val, ENET_RXCFG_REG); | 
 | 630 | 		return; | 
 | 631 | 	} | 
 | 632 |  | 
| Jiri Pirko | 0ddf477 | 2010-02-20 00:13:58 +0000 | [diff] [blame] | 633 | 	i = 0; | 
| Jiri Pirko | 22bedad | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 634 | 	netdev_for_each_mc_addr(ha, dev) { | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 635 | 		u8 *dmi_addr; | 
 | 636 | 		u32 tmp; | 
 | 637 |  | 
| Jiri Pirko | 0ddf477 | 2010-02-20 00:13:58 +0000 | [diff] [blame] | 638 | 		if (i == 3) | 
 | 639 | 			break; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 640 | 		/* update perfect match registers */ | 
| Jiri Pirko | 22bedad | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 641 | 		dmi_addr = ha->addr; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 642 | 		tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | | 
 | 643 | 			(dmi_addr[4] << 8) | dmi_addr[5]; | 
 | 644 | 		enet_writel(priv, tmp, ENET_PML_REG(i + 1)); | 
 | 645 |  | 
 | 646 | 		tmp = (dmi_addr[0] << 8 | dmi_addr[1]); | 
 | 647 | 		tmp |= ENET_PMH_DATAVALID_MASK; | 
| Jiri Pirko | 0ddf477 | 2010-02-20 00:13:58 +0000 | [diff] [blame] | 648 | 		enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 649 | 	} | 
 | 650 |  | 
 | 651 | 	for (; i < 3; i++) { | 
 | 652 | 		enet_writel(priv, 0, ENET_PML_REG(i + 1)); | 
 | 653 | 		enet_writel(priv, 0, ENET_PMH_REG(i + 1)); | 
 | 654 | 	} | 
 | 655 |  | 
 | 656 | 	enet_writel(priv, val, ENET_RXCFG_REG); | 
 | 657 | } | 
 | 658 |  | 
 | 659 | /* | 
 | 660 |  * set mac duplex parameters | 
 | 661 |  */ | 
 | 662 | static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) | 
 | 663 | { | 
 | 664 | 	u32 val; | 
 | 665 |  | 
 | 666 | 	val = enet_readl(priv, ENET_TXCTL_REG); | 
 | 667 | 	if (fullduplex) | 
 | 668 | 		val |= ENET_TXCTL_FD_MASK; | 
 | 669 | 	else | 
 | 670 | 		val &= ~ENET_TXCTL_FD_MASK; | 
 | 671 | 	enet_writel(priv, val, ENET_TXCTL_REG); | 
 | 672 | } | 
 | 673 |  | 
 | 674 | /* | 
 | 675 |  * set mac flow control parameters | 
 | 676 |  */ | 
 | 677 | static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) | 
 | 678 | { | 
 | 679 | 	u32 val; | 
 | 680 |  | 
 | 681 | 	/* rx flow control (pause frame handling) */ | 
 | 682 | 	val = enet_readl(priv, ENET_RXCFG_REG); | 
 | 683 | 	if (rx_en) | 
 | 684 | 		val |= ENET_RXCFG_ENFLOW_MASK; | 
 | 685 | 	else | 
 | 686 | 		val &= ~ENET_RXCFG_ENFLOW_MASK; | 
 | 687 | 	enet_writel(priv, val, ENET_RXCFG_REG); | 
 | 688 |  | 
 | 689 | 	/* tx flow control (pause frame generation) */ | 
 | 690 | 	val = enet_dma_readl(priv, ENETDMA_CFG_REG); | 
 | 691 | 	if (tx_en) | 
 | 692 | 		val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); | 
 | 693 | 	else | 
 | 694 | 		val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); | 
 | 695 | 	enet_dma_writel(priv, val, ENETDMA_CFG_REG); | 
 | 696 | } | 
 | 697 |  | 
 | 698 | /* | 
 | 699 |  * link changed callback (from phylib) | 
 | 700 |  */ | 
 | 701 | static void bcm_enet_adjust_phy_link(struct net_device *dev) | 
 | 702 | { | 
 | 703 | 	struct bcm_enet_priv *priv; | 
 | 704 | 	struct phy_device *phydev; | 
 | 705 | 	int status_changed; | 
 | 706 |  | 
 | 707 | 	priv = netdev_priv(dev); | 
 | 708 | 	phydev = priv->phydev; | 
 | 709 | 	status_changed = 0; | 
 | 710 |  | 
 | 711 | 	if (priv->old_link != phydev->link) { | 
 | 712 | 		status_changed = 1; | 
 | 713 | 		priv->old_link = phydev->link; | 
 | 714 | 	} | 
 | 715 |  | 
 | 716 | 	/* reflect duplex change in mac configuration */ | 
 | 717 | 	if (phydev->link && phydev->duplex != priv->old_duplex) { | 
 | 718 | 		bcm_enet_set_duplex(priv, | 
 | 719 | 				    (phydev->duplex == DUPLEX_FULL) ? 1 : 0); | 
 | 720 | 		status_changed = 1; | 
 | 721 | 		priv->old_duplex = phydev->duplex; | 
 | 722 | 	} | 
 | 723 |  | 
 | 724 | 	/* enable flow control if remote advertise it (trust phylib to | 
 | 725 | 	 * check that duplex is full */ | 
 | 726 | 	if (phydev->link && phydev->pause != priv->old_pause) { | 
 | 727 | 		int rx_pause_en, tx_pause_en; | 
 | 728 |  | 
 | 729 | 		if (phydev->pause) { | 
 | 730 | 			/* pause was advertised by lpa and us */ | 
 | 731 | 			rx_pause_en = 1; | 
 | 732 | 			tx_pause_en = 1; | 
 | 733 | 		} else if (!priv->pause_auto) { | 
 | 734 | 			/* pause setting overrided by user */ | 
 | 735 | 			rx_pause_en = priv->pause_rx; | 
 | 736 | 			tx_pause_en = priv->pause_tx; | 
 | 737 | 		} else { | 
 | 738 | 			rx_pause_en = 0; | 
 | 739 | 			tx_pause_en = 0; | 
 | 740 | 		} | 
 | 741 |  | 
 | 742 | 		bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); | 
 | 743 | 		status_changed = 1; | 
 | 744 | 		priv->old_pause = phydev->pause; | 
 | 745 | 	} | 
 | 746 |  | 
 | 747 | 	if (status_changed) { | 
 | 748 | 		pr_info("%s: link %s", dev->name, phydev->link ? | 
 | 749 | 			"UP" : "DOWN"); | 
 | 750 | 		if (phydev->link) | 
 | 751 | 			pr_cont(" - %d/%s - flow control %s", phydev->speed, | 
 | 752 | 			       DUPLEX_FULL == phydev->duplex ? "full" : "half", | 
 | 753 | 			       phydev->pause == 1 ? "rx&tx" : "off"); | 
 | 754 |  | 
 | 755 | 		pr_cont("\n"); | 
 | 756 | 	} | 
 | 757 | } | 
 | 758 |  | 
 | 759 | /* | 
 | 760 |  * link changed callback (if phylib is not used) | 
 | 761 |  */ | 
 | 762 | static void bcm_enet_adjust_link(struct net_device *dev) | 
 | 763 | { | 
 | 764 | 	struct bcm_enet_priv *priv; | 
 | 765 |  | 
 | 766 | 	priv = netdev_priv(dev); | 
 | 767 | 	bcm_enet_set_duplex(priv, priv->force_duplex_full); | 
 | 768 | 	bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); | 
 | 769 | 	netif_carrier_on(dev); | 
 | 770 |  | 
 | 771 | 	pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", | 
 | 772 | 		dev->name, | 
 | 773 | 		priv->force_speed_100 ? 100 : 10, | 
 | 774 | 		priv->force_duplex_full ? "full" : "half", | 
 | 775 | 		priv->pause_rx ? "rx" : "off", | 
 | 776 | 		priv->pause_tx ? "tx" : "off"); | 
 | 777 | } | 
 | 778 |  | 
 | 779 | /* | 
 | 780 |  * open callback, allocate dma rings & buffers and start rx operation | 
 | 781 |  */ | 
 | 782 | static int bcm_enet_open(struct net_device *dev) | 
 | 783 | { | 
 | 784 | 	struct bcm_enet_priv *priv; | 
 | 785 | 	struct sockaddr addr; | 
 | 786 | 	struct device *kdev; | 
 | 787 | 	struct phy_device *phydev; | 
 | 788 | 	int i, ret; | 
 | 789 | 	unsigned int size; | 
 | 790 | 	char phy_id[MII_BUS_ID_SIZE + 3]; | 
 | 791 | 	void *p; | 
 | 792 | 	u32 val; | 
 | 793 |  | 
 | 794 | 	priv = netdev_priv(dev); | 
 | 795 | 	kdev = &priv->pdev->dev; | 
 | 796 |  | 
 | 797 | 	if (priv->has_phy) { | 
 | 798 | 		/* connect to PHY */ | 
 | 799 | 		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, | 
 | 800 | 			 priv->mac_id ? "1" : "0", priv->phy_id); | 
 | 801 |  | 
| Joe Perches | c061b18 | 2010-08-23 18:20:03 +0000 | [diff] [blame] | 802 | 		phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0, | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 803 | 				     PHY_INTERFACE_MODE_MII); | 
 | 804 |  | 
 | 805 | 		if (IS_ERR(phydev)) { | 
 | 806 | 			dev_err(kdev, "could not attach to PHY\n"); | 
 | 807 | 			return PTR_ERR(phydev); | 
 | 808 | 		} | 
 | 809 |  | 
 | 810 | 		/* mask with MAC supported features */ | 
 | 811 | 		phydev->supported &= (SUPPORTED_10baseT_Half | | 
 | 812 | 				      SUPPORTED_10baseT_Full | | 
 | 813 | 				      SUPPORTED_100baseT_Half | | 
 | 814 | 				      SUPPORTED_100baseT_Full | | 
 | 815 | 				      SUPPORTED_Autoneg | | 
 | 816 | 				      SUPPORTED_Pause | | 
 | 817 | 				      SUPPORTED_MII); | 
 | 818 | 		phydev->advertising = phydev->supported; | 
 | 819 |  | 
 | 820 | 		if (priv->pause_auto && priv->pause_rx && priv->pause_tx) | 
 | 821 | 			phydev->advertising |= SUPPORTED_Pause; | 
 | 822 | 		else | 
 | 823 | 			phydev->advertising &= ~SUPPORTED_Pause; | 
 | 824 |  | 
 | 825 | 		dev_info(kdev, "attached PHY at address %d [%s]\n", | 
 | 826 | 			 phydev->addr, phydev->drv->name); | 
 | 827 |  | 
 | 828 | 		priv->old_link = 0; | 
 | 829 | 		priv->old_duplex = -1; | 
 | 830 | 		priv->old_pause = -1; | 
 | 831 | 		priv->phydev = phydev; | 
 | 832 | 	} | 
 | 833 |  | 
 | 834 | 	/* mask all interrupts and request them */ | 
 | 835 | 	enet_writel(priv, 0, ENET_IRMASK_REG); | 
 | 836 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 837 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 838 |  | 
 | 839 | 	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); | 
 | 840 | 	if (ret) | 
 | 841 | 		goto out_phy_disconnect; | 
 | 842 |  | 
| Javier Martinez Canillas | ab392d2 | 2011-03-28 16:27:31 +0000 | [diff] [blame] | 843 | 	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED, | 
 | 844 | 			  dev->name, dev); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 845 | 	if (ret) | 
 | 846 | 		goto out_freeirq; | 
 | 847 |  | 
 | 848 | 	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, | 
 | 849 | 			  IRQF_DISABLED, dev->name, dev); | 
 | 850 | 	if (ret) | 
 | 851 | 		goto out_freeirq_rx; | 
 | 852 |  | 
 | 853 | 	/* initialize perfect match registers */ | 
 | 854 | 	for (i = 0; i < 4; i++) { | 
 | 855 | 		enet_writel(priv, 0, ENET_PML_REG(i)); | 
 | 856 | 		enet_writel(priv, 0, ENET_PMH_REG(i)); | 
 | 857 | 	} | 
 | 858 |  | 
 | 859 | 	/* write device mac address */ | 
 | 860 | 	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); | 
 | 861 | 	bcm_enet_set_mac_address(dev, &addr); | 
 | 862 |  | 
 | 863 | 	/* allocate rx dma ring */ | 
 | 864 | 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); | 
 | 865 | 	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); | 
 | 866 | 	if (!p) { | 
 | 867 | 		dev_err(kdev, "cannot allocate rx ring %u\n", size); | 
 | 868 | 		ret = -ENOMEM; | 
 | 869 | 		goto out_freeirq_tx; | 
 | 870 | 	} | 
 | 871 |  | 
 | 872 | 	memset(p, 0, size); | 
 | 873 | 	priv->rx_desc_alloc_size = size; | 
 | 874 | 	priv->rx_desc_cpu = p; | 
 | 875 |  | 
 | 876 | 	/* allocate tx dma ring */ | 
 | 877 | 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); | 
 | 878 | 	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); | 
 | 879 | 	if (!p) { | 
 | 880 | 		dev_err(kdev, "cannot allocate tx ring\n"); | 
 | 881 | 		ret = -ENOMEM; | 
 | 882 | 		goto out_free_rx_ring; | 
 | 883 | 	} | 
 | 884 |  | 
 | 885 | 	memset(p, 0, size); | 
 | 886 | 	priv->tx_desc_alloc_size = size; | 
 | 887 | 	priv->tx_desc_cpu = p; | 
 | 888 |  | 
 | 889 | 	priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, | 
 | 890 | 			       GFP_KERNEL); | 
 | 891 | 	if (!priv->tx_skb) { | 
 | 892 | 		dev_err(kdev, "cannot allocate rx skb queue\n"); | 
 | 893 | 		ret = -ENOMEM; | 
 | 894 | 		goto out_free_tx_ring; | 
 | 895 | 	} | 
 | 896 |  | 
 | 897 | 	priv->tx_desc_count = priv->tx_ring_size; | 
 | 898 | 	priv->tx_dirty_desc = 0; | 
 | 899 | 	priv->tx_curr_desc = 0; | 
 | 900 | 	spin_lock_init(&priv->tx_lock); | 
 | 901 |  | 
 | 902 | 	/* init & fill rx ring with skbs */ | 
 | 903 | 	priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, | 
 | 904 | 			       GFP_KERNEL); | 
 | 905 | 	if (!priv->rx_skb) { | 
 | 906 | 		dev_err(kdev, "cannot allocate rx skb queue\n"); | 
 | 907 | 		ret = -ENOMEM; | 
 | 908 | 		goto out_free_tx_skb; | 
 | 909 | 	} | 
 | 910 |  | 
 | 911 | 	priv->rx_desc_count = 0; | 
 | 912 | 	priv->rx_dirty_desc = 0; | 
 | 913 | 	priv->rx_curr_desc = 0; | 
 | 914 |  | 
 | 915 | 	/* initialize flow control buffer allocation */ | 
 | 916 | 	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, | 
 | 917 | 			ENETDMA_BUFALLOC_REG(priv->rx_chan)); | 
 | 918 |  | 
 | 919 | 	if (bcm_enet_refill_rx(dev)) { | 
 | 920 | 		dev_err(kdev, "cannot allocate rx skb queue\n"); | 
 | 921 | 		ret = -ENOMEM; | 
 | 922 | 		goto out; | 
 | 923 | 	} | 
 | 924 |  | 
 | 925 | 	/* write rx & tx ring addresses */ | 
 | 926 | 	enet_dma_writel(priv, priv->rx_desc_dma, | 
 | 927 | 			ENETDMA_RSTART_REG(priv->rx_chan)); | 
 | 928 | 	enet_dma_writel(priv, priv->tx_desc_dma, | 
 | 929 | 			ENETDMA_RSTART_REG(priv->tx_chan)); | 
 | 930 |  | 
 | 931 | 	/* clear remaining state ram for rx & tx channel */ | 
 | 932 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); | 
 | 933 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); | 
 | 934 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); | 
 | 935 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); | 
 | 936 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); | 
 | 937 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); | 
 | 938 |  | 
 | 939 | 	/* set max rx/tx length */ | 
 | 940 | 	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); | 
 | 941 | 	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); | 
 | 942 |  | 
 | 943 | 	/* set dma maximum burst len */ | 
 | 944 | 	enet_dma_writel(priv, BCMENET_DMA_MAXBURST, | 
 | 945 | 			ENETDMA_MAXBURST_REG(priv->rx_chan)); | 
 | 946 | 	enet_dma_writel(priv, BCMENET_DMA_MAXBURST, | 
 | 947 | 			ENETDMA_MAXBURST_REG(priv->tx_chan)); | 
 | 948 |  | 
 | 949 | 	/* set correct transmit fifo watermark */ | 
 | 950 | 	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); | 
 | 951 |  | 
 | 952 | 	/* set flow control low/high threshold to 1/3 / 2/3 */ | 
 | 953 | 	val = priv->rx_ring_size / 3; | 
 | 954 | 	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); | 
 | 955 | 	val = (priv->rx_ring_size * 2) / 3; | 
 | 956 | 	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); | 
 | 957 |  | 
 | 958 | 	/* all set, enable mac and interrupts, start dma engine and | 
 | 959 | 	 * kick rx dma channel */ | 
 | 960 | 	wmb(); | 
| Florian Fainelli | 5e10d4a | 2010-04-09 01:04:52 +0000 | [diff] [blame] | 961 | 	val = enet_readl(priv, ENET_CTL_REG); | 
 | 962 | 	val |= ENET_CTL_ENABLE_MASK; | 
 | 963 | 	enet_writel(priv, val, ENET_CTL_REG); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 964 | 	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); | 
 | 965 | 	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, | 
 | 966 | 			ENETDMA_CHANCFG_REG(priv->rx_chan)); | 
 | 967 |  | 
 | 968 | 	/* watch "mib counters about to overflow" interrupt */ | 
 | 969 | 	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); | 
 | 970 | 	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); | 
 | 971 |  | 
 | 972 | 	/* watch "packet transferred" interrupt in rx and tx */ | 
 | 973 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 974 | 			ENETDMA_IR_REG(priv->rx_chan)); | 
 | 975 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 976 | 			ENETDMA_IR_REG(priv->tx_chan)); | 
 | 977 |  | 
 | 978 | 	/* make sure we enable napi before rx interrupt  */ | 
 | 979 | 	napi_enable(&priv->napi); | 
 | 980 |  | 
 | 981 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 982 | 			ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 983 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 984 | 			ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 985 |  | 
 | 986 | 	if (priv->has_phy) | 
 | 987 | 		phy_start(priv->phydev); | 
 | 988 | 	else | 
 | 989 | 		bcm_enet_adjust_link(dev); | 
 | 990 |  | 
 | 991 | 	netif_start_queue(dev); | 
 | 992 | 	return 0; | 
 | 993 |  | 
 | 994 | out: | 
 | 995 | 	for (i = 0; i < priv->rx_ring_size; i++) { | 
 | 996 | 		struct bcm_enet_desc *desc; | 
 | 997 |  | 
 | 998 | 		if (!priv->rx_skb[i]) | 
 | 999 | 			continue; | 
 | 1000 |  | 
 | 1001 | 		desc = &priv->rx_desc_cpu[i]; | 
 | 1002 | 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size, | 
 | 1003 | 				 DMA_FROM_DEVICE); | 
 | 1004 | 		kfree_skb(priv->rx_skb[i]); | 
 | 1005 | 	} | 
 | 1006 | 	kfree(priv->rx_skb); | 
 | 1007 |  | 
 | 1008 | out_free_tx_skb: | 
 | 1009 | 	kfree(priv->tx_skb); | 
 | 1010 |  | 
 | 1011 | out_free_tx_ring: | 
 | 1012 | 	dma_free_coherent(kdev, priv->tx_desc_alloc_size, | 
 | 1013 | 			  priv->tx_desc_cpu, priv->tx_desc_dma); | 
 | 1014 |  | 
 | 1015 | out_free_rx_ring: | 
 | 1016 | 	dma_free_coherent(kdev, priv->rx_desc_alloc_size, | 
 | 1017 | 			  priv->rx_desc_cpu, priv->rx_desc_dma); | 
 | 1018 |  | 
 | 1019 | out_freeirq_tx: | 
 | 1020 | 	free_irq(priv->irq_tx, dev); | 
 | 1021 |  | 
 | 1022 | out_freeirq_rx: | 
 | 1023 | 	free_irq(priv->irq_rx, dev); | 
 | 1024 |  | 
 | 1025 | out_freeirq: | 
 | 1026 | 	free_irq(dev->irq, dev); | 
 | 1027 |  | 
 | 1028 | out_phy_disconnect: | 
 | 1029 | 	phy_disconnect(priv->phydev); | 
 | 1030 |  | 
 | 1031 | 	return ret; | 
 | 1032 | } | 
 | 1033 |  | 
 | 1034 | /* | 
 | 1035 |  * disable mac | 
 | 1036 |  */ | 
 | 1037 | static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) | 
 | 1038 | { | 
 | 1039 | 	int limit; | 
 | 1040 | 	u32 val; | 
 | 1041 |  | 
 | 1042 | 	val = enet_readl(priv, ENET_CTL_REG); | 
 | 1043 | 	val |= ENET_CTL_DISABLE_MASK; | 
 | 1044 | 	enet_writel(priv, val, ENET_CTL_REG); | 
 | 1045 |  | 
 | 1046 | 	limit = 1000; | 
 | 1047 | 	do { | 
 | 1048 | 		u32 val; | 
 | 1049 |  | 
 | 1050 | 		val = enet_readl(priv, ENET_CTL_REG); | 
 | 1051 | 		if (!(val & ENET_CTL_DISABLE_MASK)) | 
 | 1052 | 			break; | 
 | 1053 | 		udelay(1); | 
 | 1054 | 	} while (limit--); | 
 | 1055 | } | 
 | 1056 |  | 
 | 1057 | /* | 
 | 1058 |  * disable dma in given channel | 
 | 1059 |  */ | 
 | 1060 | static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) | 
 | 1061 | { | 
 | 1062 | 	int limit; | 
 | 1063 |  | 
 | 1064 | 	enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); | 
 | 1065 |  | 
 | 1066 | 	limit = 1000; | 
 | 1067 | 	do { | 
 | 1068 | 		u32 val; | 
 | 1069 |  | 
 | 1070 | 		val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); | 
 | 1071 | 		if (!(val & ENETDMA_CHANCFG_EN_MASK)) | 
 | 1072 | 			break; | 
 | 1073 | 		udelay(1); | 
 | 1074 | 	} while (limit--); | 
 | 1075 | } | 
 | 1076 |  | 
 | 1077 | /* | 
 | 1078 |  * stop callback | 
 | 1079 |  */ | 
 | 1080 | static int bcm_enet_stop(struct net_device *dev) | 
 | 1081 | { | 
 | 1082 | 	struct bcm_enet_priv *priv; | 
 | 1083 | 	struct device *kdev; | 
 | 1084 | 	int i; | 
 | 1085 |  | 
 | 1086 | 	priv = netdev_priv(dev); | 
 | 1087 | 	kdev = &priv->pdev->dev; | 
 | 1088 |  | 
 | 1089 | 	netif_stop_queue(dev); | 
 | 1090 | 	napi_disable(&priv->napi); | 
 | 1091 | 	if (priv->has_phy) | 
 | 1092 | 		phy_stop(priv->phydev); | 
 | 1093 | 	del_timer_sync(&priv->rx_timeout); | 
 | 1094 |  | 
 | 1095 | 	/* mask all interrupts */ | 
 | 1096 | 	enet_writel(priv, 0, ENET_IRMASK_REG); | 
 | 1097 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 1098 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 1099 |  | 
 | 1100 | 	/* make sure no mib update is scheduled */ | 
| Tejun Heo | 23f333a | 2010-12-12 16:45:14 +0100 | [diff] [blame] | 1101 | 	cancel_work_sync(&priv->mib_update_task); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1102 |  | 
 | 1103 | 	/* disable dma & mac */ | 
 | 1104 | 	bcm_enet_disable_dma(priv, priv->tx_chan); | 
 | 1105 | 	bcm_enet_disable_dma(priv, priv->rx_chan); | 
 | 1106 | 	bcm_enet_disable_mac(priv); | 
 | 1107 |  | 
 | 1108 | 	/* force reclaim of all tx buffers */ | 
 | 1109 | 	bcm_enet_tx_reclaim(dev, 1); | 
 | 1110 |  | 
 | 1111 | 	/* free the rx skb ring */ | 
 | 1112 | 	for (i = 0; i < priv->rx_ring_size; i++) { | 
 | 1113 | 		struct bcm_enet_desc *desc; | 
 | 1114 |  | 
 | 1115 | 		if (!priv->rx_skb[i]) | 
 | 1116 | 			continue; | 
 | 1117 |  | 
 | 1118 | 		desc = &priv->rx_desc_cpu[i]; | 
 | 1119 | 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size, | 
 | 1120 | 				 DMA_FROM_DEVICE); | 
 | 1121 | 		kfree_skb(priv->rx_skb[i]); | 
 | 1122 | 	} | 
 | 1123 |  | 
 | 1124 | 	/* free remaining allocated memory */ | 
 | 1125 | 	kfree(priv->rx_skb); | 
 | 1126 | 	kfree(priv->tx_skb); | 
 | 1127 | 	dma_free_coherent(kdev, priv->rx_desc_alloc_size, | 
 | 1128 | 			  priv->rx_desc_cpu, priv->rx_desc_dma); | 
 | 1129 | 	dma_free_coherent(kdev, priv->tx_desc_alloc_size, | 
 | 1130 | 			  priv->tx_desc_cpu, priv->tx_desc_dma); | 
 | 1131 | 	free_irq(priv->irq_tx, dev); | 
 | 1132 | 	free_irq(priv->irq_rx, dev); | 
 | 1133 | 	free_irq(dev->irq, dev); | 
 | 1134 |  | 
 | 1135 | 	/* release phy */ | 
 | 1136 | 	if (priv->has_phy) { | 
 | 1137 | 		phy_disconnect(priv->phydev); | 
 | 1138 | 		priv->phydev = NULL; | 
 | 1139 | 	} | 
 | 1140 |  | 
 | 1141 | 	return 0; | 
 | 1142 | } | 
 | 1143 |  | 
 | 1144 | /* | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1145 |  * ethtool callbacks | 
 | 1146 |  */ | 
 | 1147 | struct bcm_enet_stats { | 
 | 1148 | 	char stat_string[ETH_GSTRING_LEN]; | 
 | 1149 | 	int sizeof_stat; | 
 | 1150 | 	int stat_offset; | 
 | 1151 | 	int mib_reg; | 
 | 1152 | }; | 
 | 1153 |  | 
 | 1154 | #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),		\ | 
 | 1155 | 		     offsetof(struct bcm_enet_priv, m) | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 1156 | #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),		\ | 
 | 1157 | 		     offsetof(struct net_device_stats, m) | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1158 |  | 
 | 1159 | static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 1160 | 	{ "rx_packets", DEV_STAT(rx_packets), -1 }, | 
 | 1161 | 	{ "tx_packets",	DEV_STAT(tx_packets), -1 }, | 
 | 1162 | 	{ "rx_bytes", DEV_STAT(rx_bytes), -1 }, | 
 | 1163 | 	{ "tx_bytes", DEV_STAT(tx_bytes), -1 }, | 
 | 1164 | 	{ "rx_errors", DEV_STAT(rx_errors), -1 }, | 
 | 1165 | 	{ "tx_errors", DEV_STAT(tx_errors), -1 }, | 
 | 1166 | 	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 }, | 
 | 1167 | 	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 }, | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1168 |  | 
 | 1169 | 	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, | 
 | 1170 | 	{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, | 
 | 1171 | 	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, | 
 | 1172 | 	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, | 
 | 1173 | 	{ "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, | 
 | 1174 | 	{ "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, | 
 | 1175 | 	{ "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, | 
 | 1176 | 	{ "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, | 
 | 1177 | 	{ "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, | 
 | 1178 | 	{ "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, | 
 | 1179 | 	{ "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, | 
 | 1180 | 	{ "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, | 
 | 1181 | 	{ "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, | 
 | 1182 | 	{ "rx_dropped",	GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, | 
 | 1183 | 	{ "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, | 
 | 1184 | 	{ "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, | 
 | 1185 | 	{ "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, | 
 | 1186 | 	{ "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, | 
 | 1187 | 	{ "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, | 
 | 1188 | 	{ "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, | 
 | 1189 | 	{ "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, | 
 | 1190 |  | 
 | 1191 | 	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, | 
 | 1192 | 	{ "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, | 
 | 1193 | 	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, | 
 | 1194 | 	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, | 
 | 1195 | 	{ "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, | 
 | 1196 | 	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, | 
 | 1197 | 	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, | 
 | 1198 | 	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, | 
 | 1199 | 	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, | 
 | 1200 | 	{ "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, | 
 | 1201 | 	{ "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, | 
 | 1202 | 	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, | 
 | 1203 | 	{ "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, | 
 | 1204 | 	{ "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, | 
 | 1205 | 	{ "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, | 
 | 1206 | 	{ "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, | 
 | 1207 | 	{ "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, | 
 | 1208 | 	{ "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, | 
 | 1209 | 	{ "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, | 
 | 1210 | 	{ "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, | 
 | 1211 | 	{ "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, | 
 | 1212 | 	{ "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, | 
 | 1213 |  | 
 | 1214 | }; | 
 | 1215 |  | 
 | 1216 | #define BCM_ENET_STATS_LEN	\ | 
 | 1217 | 	(sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats)) | 
 | 1218 |  | 
 | 1219 | static const u32 unused_mib_regs[] = { | 
 | 1220 | 	ETH_MIB_TX_ALL_OCTETS, | 
 | 1221 | 	ETH_MIB_TX_ALL_PKTS, | 
 | 1222 | 	ETH_MIB_RX_ALL_OCTETS, | 
 | 1223 | 	ETH_MIB_RX_ALL_PKTS, | 
 | 1224 | }; | 
 | 1225 |  | 
 | 1226 |  | 
 | 1227 | static void bcm_enet_get_drvinfo(struct net_device *netdev, | 
 | 1228 | 				 struct ethtool_drvinfo *drvinfo) | 
 | 1229 | { | 
 | 1230 | 	strncpy(drvinfo->driver, bcm_enet_driver_name, 32); | 
 | 1231 | 	strncpy(drvinfo->version, bcm_enet_driver_version, 32); | 
 | 1232 | 	strncpy(drvinfo->fw_version, "N/A", 32); | 
 | 1233 | 	strncpy(drvinfo->bus_info, "bcm63xx", 32); | 
 | 1234 | 	drvinfo->n_stats = BCM_ENET_STATS_LEN; | 
 | 1235 | } | 
 | 1236 |  | 
| Florian Fainelli | a3f92ee | 2009-12-15 06:45:06 +0000 | [diff] [blame] | 1237 | static int bcm_enet_get_sset_count(struct net_device *netdev, | 
 | 1238 | 					int string_set) | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1239 | { | 
| Florian Fainelli | a3f92ee | 2009-12-15 06:45:06 +0000 | [diff] [blame] | 1240 | 	switch (string_set) { | 
 | 1241 | 	case ETH_SS_STATS: | 
 | 1242 | 		return BCM_ENET_STATS_LEN; | 
 | 1243 | 	default: | 
 | 1244 | 		return -EINVAL; | 
 | 1245 | 	} | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1246 | } | 
 | 1247 |  | 
 | 1248 | static void bcm_enet_get_strings(struct net_device *netdev, | 
 | 1249 | 				 u32 stringset, u8 *data) | 
 | 1250 | { | 
 | 1251 | 	int i; | 
 | 1252 |  | 
 | 1253 | 	switch (stringset) { | 
 | 1254 | 	case ETH_SS_STATS: | 
 | 1255 | 		for (i = 0; i < BCM_ENET_STATS_LEN; i++) { | 
 | 1256 | 			memcpy(data + i * ETH_GSTRING_LEN, | 
 | 1257 | 			       bcm_enet_gstrings_stats[i].stat_string, | 
 | 1258 | 			       ETH_GSTRING_LEN); | 
 | 1259 | 		} | 
 | 1260 | 		break; | 
 | 1261 | 	} | 
 | 1262 | } | 
 | 1263 |  | 
 | 1264 | static void update_mib_counters(struct bcm_enet_priv *priv) | 
 | 1265 | { | 
 | 1266 | 	int i; | 
 | 1267 |  | 
 | 1268 | 	for (i = 0; i < BCM_ENET_STATS_LEN; i++) { | 
 | 1269 | 		const struct bcm_enet_stats *s; | 
 | 1270 | 		u32 val; | 
 | 1271 | 		char *p; | 
 | 1272 |  | 
 | 1273 | 		s = &bcm_enet_gstrings_stats[i]; | 
 | 1274 | 		if (s->mib_reg == -1) | 
 | 1275 | 			continue; | 
 | 1276 |  | 
 | 1277 | 		val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); | 
 | 1278 | 		p = (char *)priv + s->stat_offset; | 
 | 1279 |  | 
 | 1280 | 		if (s->sizeof_stat == sizeof(u64)) | 
 | 1281 | 			*(u64 *)p += val; | 
 | 1282 | 		else | 
 | 1283 | 			*(u32 *)p += val; | 
 | 1284 | 	} | 
 | 1285 |  | 
 | 1286 | 	/* also empty unused mib counters to make sure mib counter | 
 | 1287 | 	 * overflow interrupt is cleared */ | 
 | 1288 | 	for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) | 
 | 1289 | 		(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); | 
 | 1290 | } | 
 | 1291 |  | 
 | 1292 | static void bcm_enet_update_mib_counters_defer(struct work_struct *t) | 
 | 1293 | { | 
 | 1294 | 	struct bcm_enet_priv *priv; | 
 | 1295 |  | 
 | 1296 | 	priv = container_of(t, struct bcm_enet_priv, mib_update_task); | 
 | 1297 | 	mutex_lock(&priv->mib_update_lock); | 
 | 1298 | 	update_mib_counters(priv); | 
 | 1299 | 	mutex_unlock(&priv->mib_update_lock); | 
 | 1300 |  | 
 | 1301 | 	/* reenable mib interrupt */ | 
 | 1302 | 	if (netif_running(priv->net_dev)) | 
 | 1303 | 		enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); | 
 | 1304 | } | 
 | 1305 |  | 
 | 1306 | static void bcm_enet_get_ethtool_stats(struct net_device *netdev, | 
 | 1307 | 				       struct ethtool_stats *stats, | 
 | 1308 | 				       u64 *data) | 
 | 1309 | { | 
 | 1310 | 	struct bcm_enet_priv *priv; | 
 | 1311 | 	int i; | 
 | 1312 |  | 
 | 1313 | 	priv = netdev_priv(netdev); | 
 | 1314 |  | 
 | 1315 | 	mutex_lock(&priv->mib_update_lock); | 
 | 1316 | 	update_mib_counters(priv); | 
 | 1317 |  | 
 | 1318 | 	for (i = 0; i < BCM_ENET_STATS_LEN; i++) { | 
 | 1319 | 		const struct bcm_enet_stats *s; | 
 | 1320 | 		char *p; | 
 | 1321 |  | 
 | 1322 | 		s = &bcm_enet_gstrings_stats[i]; | 
| Eric Dumazet | c32d83c | 2010-08-24 12:24:07 -0700 | [diff] [blame] | 1323 | 		if (s->mib_reg == -1) | 
 | 1324 | 			p = (char *)&netdev->stats; | 
 | 1325 | 		else | 
 | 1326 | 			p = (char *)priv; | 
 | 1327 | 		p += s->stat_offset; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1328 | 		data[i] = (s->sizeof_stat == sizeof(u64)) ? | 
 | 1329 | 			*(u64 *)p : *(u32 *)p; | 
 | 1330 | 	} | 
 | 1331 | 	mutex_unlock(&priv->mib_update_lock); | 
 | 1332 | } | 
 | 1333 |  | 
 | 1334 | static int bcm_enet_get_settings(struct net_device *dev, | 
 | 1335 | 				 struct ethtool_cmd *cmd) | 
 | 1336 | { | 
 | 1337 | 	struct bcm_enet_priv *priv; | 
 | 1338 |  | 
 | 1339 | 	priv = netdev_priv(dev); | 
 | 1340 |  | 
 | 1341 | 	cmd->maxrxpkt = 0; | 
 | 1342 | 	cmd->maxtxpkt = 0; | 
 | 1343 |  | 
 | 1344 | 	if (priv->has_phy) { | 
 | 1345 | 		if (!priv->phydev) | 
 | 1346 | 			return -ENODEV; | 
 | 1347 | 		return phy_ethtool_gset(priv->phydev, cmd); | 
 | 1348 | 	} else { | 
 | 1349 | 		cmd->autoneg = 0; | 
| David Decotigny | 7073949 | 2011-04-27 18:32:40 +0000 | [diff] [blame] | 1350 | 		ethtool_cmd_speed_set(cmd, ((priv->force_speed_100) | 
 | 1351 | 					    ? SPEED_100 : SPEED_10)); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1352 | 		cmd->duplex = (priv->force_duplex_full) ? | 
 | 1353 | 			DUPLEX_FULL : DUPLEX_HALF; | 
 | 1354 | 		cmd->supported = ADVERTISED_10baseT_Half  | | 
 | 1355 | 			ADVERTISED_10baseT_Full | | 
 | 1356 | 			ADVERTISED_100baseT_Half | | 
 | 1357 | 			ADVERTISED_100baseT_Full; | 
 | 1358 | 		cmd->advertising = 0; | 
 | 1359 | 		cmd->port = PORT_MII; | 
 | 1360 | 		cmd->transceiver = XCVR_EXTERNAL; | 
 | 1361 | 	} | 
 | 1362 | 	return 0; | 
 | 1363 | } | 
 | 1364 |  | 
 | 1365 | static int bcm_enet_set_settings(struct net_device *dev, | 
 | 1366 | 				 struct ethtool_cmd *cmd) | 
 | 1367 | { | 
 | 1368 | 	struct bcm_enet_priv *priv; | 
 | 1369 |  | 
 | 1370 | 	priv = netdev_priv(dev); | 
 | 1371 | 	if (priv->has_phy) { | 
 | 1372 | 		if (!priv->phydev) | 
 | 1373 | 			return -ENODEV; | 
 | 1374 | 		return phy_ethtool_sset(priv->phydev, cmd); | 
 | 1375 | 	} else { | 
 | 1376 |  | 
 | 1377 | 		if (cmd->autoneg || | 
 | 1378 | 		    (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || | 
 | 1379 | 		    cmd->port != PORT_MII) | 
 | 1380 | 			return -EINVAL; | 
 | 1381 |  | 
 | 1382 | 		priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; | 
 | 1383 | 		priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; | 
 | 1384 |  | 
 | 1385 | 		if (netif_running(dev)) | 
 | 1386 | 			bcm_enet_adjust_link(dev); | 
 | 1387 | 		return 0; | 
 | 1388 | 	} | 
 | 1389 | } | 
 | 1390 |  | 
 | 1391 | static void bcm_enet_get_ringparam(struct net_device *dev, | 
 | 1392 | 				   struct ethtool_ringparam *ering) | 
 | 1393 | { | 
 | 1394 | 	struct bcm_enet_priv *priv; | 
 | 1395 |  | 
 | 1396 | 	priv = netdev_priv(dev); | 
 | 1397 |  | 
 | 1398 | 	/* rx/tx ring is actually only limited by memory */ | 
 | 1399 | 	ering->rx_max_pending = 8192; | 
 | 1400 | 	ering->tx_max_pending = 8192; | 
 | 1401 | 	ering->rx_mini_max_pending = 0; | 
 | 1402 | 	ering->rx_jumbo_max_pending = 0; | 
 | 1403 | 	ering->rx_pending = priv->rx_ring_size; | 
 | 1404 | 	ering->tx_pending = priv->tx_ring_size; | 
 | 1405 | } | 
 | 1406 |  | 
 | 1407 | static int bcm_enet_set_ringparam(struct net_device *dev, | 
 | 1408 | 				  struct ethtool_ringparam *ering) | 
 | 1409 | { | 
 | 1410 | 	struct bcm_enet_priv *priv; | 
 | 1411 | 	int was_running; | 
 | 1412 |  | 
 | 1413 | 	priv = netdev_priv(dev); | 
 | 1414 |  | 
 | 1415 | 	was_running = 0; | 
 | 1416 | 	if (netif_running(dev)) { | 
 | 1417 | 		bcm_enet_stop(dev); | 
 | 1418 | 		was_running = 1; | 
 | 1419 | 	} | 
 | 1420 |  | 
 | 1421 | 	priv->rx_ring_size = ering->rx_pending; | 
 | 1422 | 	priv->tx_ring_size = ering->tx_pending; | 
 | 1423 |  | 
 | 1424 | 	if (was_running) { | 
 | 1425 | 		int err; | 
 | 1426 |  | 
 | 1427 | 		err = bcm_enet_open(dev); | 
 | 1428 | 		if (err) | 
 | 1429 | 			dev_close(dev); | 
 | 1430 | 		else | 
 | 1431 | 			bcm_enet_set_multicast_list(dev); | 
 | 1432 | 	} | 
 | 1433 | 	return 0; | 
 | 1434 | } | 
 | 1435 |  | 
 | 1436 | static void bcm_enet_get_pauseparam(struct net_device *dev, | 
 | 1437 | 				    struct ethtool_pauseparam *ecmd) | 
 | 1438 | { | 
 | 1439 | 	struct bcm_enet_priv *priv; | 
 | 1440 |  | 
 | 1441 | 	priv = netdev_priv(dev); | 
 | 1442 | 	ecmd->autoneg = priv->pause_auto; | 
 | 1443 | 	ecmd->rx_pause = priv->pause_rx; | 
 | 1444 | 	ecmd->tx_pause = priv->pause_tx; | 
 | 1445 | } | 
 | 1446 |  | 
 | 1447 | static int bcm_enet_set_pauseparam(struct net_device *dev, | 
 | 1448 | 				   struct ethtool_pauseparam *ecmd) | 
 | 1449 | { | 
 | 1450 | 	struct bcm_enet_priv *priv; | 
 | 1451 |  | 
 | 1452 | 	priv = netdev_priv(dev); | 
 | 1453 |  | 
 | 1454 | 	if (priv->has_phy) { | 
 | 1455 | 		if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { | 
 | 1456 | 			/* asymetric pause mode not supported, | 
 | 1457 | 			 * actually possible but integrated PHY has RO | 
 | 1458 | 			 * asym_pause bit */ | 
 | 1459 | 			return -EINVAL; | 
 | 1460 | 		} | 
 | 1461 | 	} else { | 
 | 1462 | 		/* no pause autoneg on direct mii connection */ | 
 | 1463 | 		if (ecmd->autoneg) | 
 | 1464 | 			return -EINVAL; | 
 | 1465 | 	} | 
 | 1466 |  | 
 | 1467 | 	priv->pause_auto = ecmd->autoneg; | 
 | 1468 | 	priv->pause_rx = ecmd->rx_pause; | 
 | 1469 | 	priv->pause_tx = ecmd->tx_pause; | 
 | 1470 |  | 
 | 1471 | 	return 0; | 
 | 1472 | } | 
 | 1473 |  | 
 | 1474 | static struct ethtool_ops bcm_enet_ethtool_ops = { | 
 | 1475 | 	.get_strings		= bcm_enet_get_strings, | 
| Florian Fainelli | a3f92ee | 2009-12-15 06:45:06 +0000 | [diff] [blame] | 1476 | 	.get_sset_count		= bcm_enet_get_sset_count, | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1477 | 	.get_ethtool_stats      = bcm_enet_get_ethtool_stats, | 
 | 1478 | 	.get_settings		= bcm_enet_get_settings, | 
 | 1479 | 	.set_settings		= bcm_enet_set_settings, | 
 | 1480 | 	.get_drvinfo		= bcm_enet_get_drvinfo, | 
 | 1481 | 	.get_link		= ethtool_op_get_link, | 
 | 1482 | 	.get_ringparam		= bcm_enet_get_ringparam, | 
 | 1483 | 	.set_ringparam		= bcm_enet_set_ringparam, | 
 | 1484 | 	.get_pauseparam		= bcm_enet_get_pauseparam, | 
 | 1485 | 	.set_pauseparam		= bcm_enet_set_pauseparam, | 
 | 1486 | }; | 
 | 1487 |  | 
 | 1488 | static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 
 | 1489 | { | 
 | 1490 | 	struct bcm_enet_priv *priv; | 
 | 1491 |  | 
 | 1492 | 	priv = netdev_priv(dev); | 
 | 1493 | 	if (priv->has_phy) { | 
 | 1494 | 		if (!priv->phydev) | 
 | 1495 | 			return -ENODEV; | 
| Richard Cochran | 28b0411 | 2010-07-17 08:48:55 +0000 | [diff] [blame] | 1496 | 		return phy_mii_ioctl(priv->phydev, rq, cmd); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1497 | 	} else { | 
 | 1498 | 		struct mii_if_info mii; | 
 | 1499 |  | 
 | 1500 | 		mii.dev = dev; | 
 | 1501 | 		mii.mdio_read = bcm_enet_mdio_read_mii; | 
 | 1502 | 		mii.mdio_write = bcm_enet_mdio_write_mii; | 
 | 1503 | 		mii.phy_id = 0; | 
 | 1504 | 		mii.phy_id_mask = 0x3f; | 
 | 1505 | 		mii.reg_num_mask = 0x1f; | 
 | 1506 | 		return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); | 
 | 1507 | 	} | 
 | 1508 | } | 
 | 1509 |  | 
 | 1510 | /* | 
 | 1511 |  * calculate actual hardware mtu | 
 | 1512 |  */ | 
 | 1513 | static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) | 
 | 1514 | { | 
 | 1515 | 	int actual_mtu; | 
 | 1516 |  | 
 | 1517 | 	actual_mtu = mtu; | 
 | 1518 |  | 
 | 1519 | 	/* add ethernet header + vlan tag size */ | 
 | 1520 | 	actual_mtu += VLAN_ETH_HLEN; | 
 | 1521 |  | 
 | 1522 | 	if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) | 
 | 1523 | 		return -EINVAL; | 
 | 1524 |  | 
 | 1525 | 	/* | 
 | 1526 | 	 * setup maximum size before we get overflow mark in | 
 | 1527 | 	 * descriptor, note that this will not prevent reception of | 
 | 1528 | 	 * big frames, they will be split into multiple buffers | 
 | 1529 | 	 * anyway | 
 | 1530 | 	 */ | 
 | 1531 | 	priv->hw_mtu = actual_mtu; | 
 | 1532 |  | 
 | 1533 | 	/* | 
 | 1534 | 	 * align rx buffer size to dma burst len, account FCS since | 
 | 1535 | 	 * it's appended | 
 | 1536 | 	 */ | 
 | 1537 | 	priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, | 
 | 1538 | 				  BCMENET_DMA_MAXBURST * 4); | 
 | 1539 | 	return 0; | 
 | 1540 | } | 
 | 1541 |  | 
 | 1542 | /* | 
 | 1543 |  * adjust mtu, can't be called while device is running | 
 | 1544 |  */ | 
 | 1545 | static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) | 
 | 1546 | { | 
 | 1547 | 	int ret; | 
 | 1548 |  | 
 | 1549 | 	if (netif_running(dev)) | 
 | 1550 | 		return -EBUSY; | 
 | 1551 |  | 
 | 1552 | 	ret = compute_hw_mtu(netdev_priv(dev), new_mtu); | 
 | 1553 | 	if (ret) | 
 | 1554 | 		return ret; | 
 | 1555 | 	dev->mtu = new_mtu; | 
 | 1556 | 	return 0; | 
 | 1557 | } | 
 | 1558 |  | 
 | 1559 | /* | 
 | 1560 |  * preinit hardware to allow mii operation while device is down | 
 | 1561 |  */ | 
 | 1562 | static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) | 
 | 1563 | { | 
 | 1564 | 	u32 val; | 
 | 1565 | 	int limit; | 
 | 1566 |  | 
 | 1567 | 	/* make sure mac is disabled */ | 
 | 1568 | 	bcm_enet_disable_mac(priv); | 
 | 1569 |  | 
 | 1570 | 	/* soft reset mac */ | 
 | 1571 | 	val = ENET_CTL_SRESET_MASK; | 
 | 1572 | 	enet_writel(priv, val, ENET_CTL_REG); | 
 | 1573 | 	wmb(); | 
 | 1574 |  | 
 | 1575 | 	limit = 1000; | 
 | 1576 | 	do { | 
 | 1577 | 		val = enet_readl(priv, ENET_CTL_REG); | 
 | 1578 | 		if (!(val & ENET_CTL_SRESET_MASK)) | 
 | 1579 | 			break; | 
 | 1580 | 		udelay(1); | 
 | 1581 | 	} while (limit--); | 
 | 1582 |  | 
 | 1583 | 	/* select correct mii interface */ | 
 | 1584 | 	val = enet_readl(priv, ENET_CTL_REG); | 
 | 1585 | 	if (priv->use_external_mii) | 
 | 1586 | 		val |= ENET_CTL_EPHYSEL_MASK; | 
 | 1587 | 	else | 
 | 1588 | 		val &= ~ENET_CTL_EPHYSEL_MASK; | 
 | 1589 | 	enet_writel(priv, val, ENET_CTL_REG); | 
 | 1590 |  | 
 | 1591 | 	/* turn on mdc clock */ | 
 | 1592 | 	enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | | 
 | 1593 | 		    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); | 
 | 1594 |  | 
 | 1595 | 	/* set mib counters to self-clear when read */ | 
 | 1596 | 	val = enet_readl(priv, ENET_MIBCTL_REG); | 
 | 1597 | 	val |= ENET_MIBCTL_RDCLEAR_MASK; | 
 | 1598 | 	enet_writel(priv, val, ENET_MIBCTL_REG); | 
 | 1599 | } | 
 | 1600 |  | 
 | 1601 | static const struct net_device_ops bcm_enet_ops = { | 
 | 1602 | 	.ndo_open		= bcm_enet_open, | 
 | 1603 | 	.ndo_stop		= bcm_enet_stop, | 
 | 1604 | 	.ndo_start_xmit		= bcm_enet_start_xmit, | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1605 | 	.ndo_set_mac_address	= bcm_enet_set_mac_address, | 
 | 1606 | 	.ndo_set_multicast_list = bcm_enet_set_multicast_list, | 
 | 1607 | 	.ndo_do_ioctl		= bcm_enet_ioctl, | 
 | 1608 | 	.ndo_change_mtu		= bcm_enet_change_mtu, | 
 | 1609 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 1610 | 	.ndo_poll_controller = bcm_enet_netpoll, | 
 | 1611 | #endif | 
 | 1612 | }; | 
 | 1613 |  | 
 | 1614 | /* | 
 | 1615 |  * allocate netdevice, request register memory and register device. | 
 | 1616 |  */ | 
 | 1617 | static int __devinit bcm_enet_probe(struct platform_device *pdev) | 
 | 1618 | { | 
 | 1619 | 	struct bcm_enet_priv *priv; | 
 | 1620 | 	struct net_device *dev; | 
 | 1621 | 	struct bcm63xx_enet_platform_data *pd; | 
 | 1622 | 	struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; | 
 | 1623 | 	struct mii_bus *bus; | 
 | 1624 | 	const char *clk_name; | 
 | 1625 | 	unsigned int iomem_size; | 
 | 1626 | 	int i, ret; | 
 | 1627 |  | 
 | 1628 | 	/* stop if shared driver failed, assume driver->probe will be | 
 | 1629 | 	 * called in the same order we register devices (correct ?) */ | 
 | 1630 | 	if (!bcm_enet_shared_base) | 
 | 1631 | 		return -ENODEV; | 
 | 1632 |  | 
 | 1633 | 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 1634 | 	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 
 | 1635 | 	res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | 
 | 1636 | 	res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); | 
 | 1637 | 	if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) | 
 | 1638 | 		return -ENODEV; | 
 | 1639 |  | 
 | 1640 | 	ret = 0; | 
 | 1641 | 	dev = alloc_etherdev(sizeof(*priv)); | 
 | 1642 | 	if (!dev) | 
 | 1643 | 		return -ENOMEM; | 
 | 1644 | 	priv = netdev_priv(dev); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1645 |  | 
 | 1646 | 	ret = compute_hw_mtu(priv, dev->mtu); | 
 | 1647 | 	if (ret) | 
 | 1648 | 		goto out; | 
 | 1649 |  | 
| Joe Perches | 28f65c11 | 2011-06-09 09:13:32 -0700 | [diff] [blame] | 1650 | 	iomem_size = resource_size(res_mem); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1651 | 	if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) { | 
 | 1652 | 		ret = -EBUSY; | 
 | 1653 | 		goto out; | 
 | 1654 | 	} | 
 | 1655 |  | 
 | 1656 | 	priv->base = ioremap(res_mem->start, iomem_size); | 
 | 1657 | 	if (priv->base == NULL) { | 
 | 1658 | 		ret = -ENOMEM; | 
 | 1659 | 		goto out_release_mem; | 
 | 1660 | 	} | 
 | 1661 | 	dev->irq = priv->irq = res_irq->start; | 
 | 1662 | 	priv->irq_rx = res_irq_rx->start; | 
 | 1663 | 	priv->irq_tx = res_irq_tx->start; | 
 | 1664 | 	priv->mac_id = pdev->id; | 
 | 1665 |  | 
 | 1666 | 	/* get rx & tx dma channel id for this mac */ | 
 | 1667 | 	if (priv->mac_id == 0) { | 
 | 1668 | 		priv->rx_chan = 0; | 
 | 1669 | 		priv->tx_chan = 1; | 
 | 1670 | 		clk_name = "enet0"; | 
 | 1671 | 	} else { | 
 | 1672 | 		priv->rx_chan = 2; | 
 | 1673 | 		priv->tx_chan = 3; | 
 | 1674 | 		clk_name = "enet1"; | 
 | 1675 | 	} | 
 | 1676 |  | 
 | 1677 | 	priv->mac_clk = clk_get(&pdev->dev, clk_name); | 
 | 1678 | 	if (IS_ERR(priv->mac_clk)) { | 
 | 1679 | 		ret = PTR_ERR(priv->mac_clk); | 
 | 1680 | 		goto out_unmap; | 
 | 1681 | 	} | 
 | 1682 | 	clk_enable(priv->mac_clk); | 
 | 1683 |  | 
 | 1684 | 	/* initialize default and fetch platform data */ | 
 | 1685 | 	priv->rx_ring_size = BCMENET_DEF_RX_DESC; | 
 | 1686 | 	priv->tx_ring_size = BCMENET_DEF_TX_DESC; | 
 | 1687 |  | 
 | 1688 | 	pd = pdev->dev.platform_data; | 
 | 1689 | 	if (pd) { | 
 | 1690 | 		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); | 
 | 1691 | 		priv->has_phy = pd->has_phy; | 
 | 1692 | 		priv->phy_id = pd->phy_id; | 
 | 1693 | 		priv->has_phy_interrupt = pd->has_phy_interrupt; | 
 | 1694 | 		priv->phy_interrupt = pd->phy_interrupt; | 
 | 1695 | 		priv->use_external_mii = !pd->use_internal_phy; | 
 | 1696 | 		priv->pause_auto = pd->pause_auto; | 
 | 1697 | 		priv->pause_rx = pd->pause_rx; | 
 | 1698 | 		priv->pause_tx = pd->pause_tx; | 
 | 1699 | 		priv->force_duplex_full = pd->force_duplex_full; | 
 | 1700 | 		priv->force_speed_100 = pd->force_speed_100; | 
 | 1701 | 	} | 
 | 1702 |  | 
 | 1703 | 	if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { | 
 | 1704 | 		/* using internal PHY, enable clock */ | 
 | 1705 | 		priv->phy_clk = clk_get(&pdev->dev, "ephy"); | 
 | 1706 | 		if (IS_ERR(priv->phy_clk)) { | 
 | 1707 | 			ret = PTR_ERR(priv->phy_clk); | 
 | 1708 | 			priv->phy_clk = NULL; | 
 | 1709 | 			goto out_put_clk_mac; | 
 | 1710 | 		} | 
 | 1711 | 		clk_enable(priv->phy_clk); | 
 | 1712 | 	} | 
 | 1713 |  | 
 | 1714 | 	/* do minimal hardware init to be able to probe mii bus */ | 
 | 1715 | 	bcm_enet_hw_preinit(priv); | 
 | 1716 |  | 
 | 1717 | 	/* MII bus registration */ | 
 | 1718 | 	if (priv->has_phy) { | 
 | 1719 |  | 
 | 1720 | 		priv->mii_bus = mdiobus_alloc(); | 
 | 1721 | 		if (!priv->mii_bus) { | 
 | 1722 | 			ret = -ENOMEM; | 
 | 1723 | 			goto out_uninit_hw; | 
 | 1724 | 		} | 
 | 1725 |  | 
 | 1726 | 		bus = priv->mii_bus; | 
 | 1727 | 		bus->name = "bcm63xx_enet MII bus"; | 
 | 1728 | 		bus->parent = &pdev->dev; | 
 | 1729 | 		bus->priv = priv; | 
 | 1730 | 		bus->read = bcm_enet_mdio_read_phylib; | 
 | 1731 | 		bus->write = bcm_enet_mdio_write_phylib; | 
 | 1732 | 		sprintf(bus->id, "%d", priv->mac_id); | 
 | 1733 |  | 
 | 1734 | 		/* only probe bus where we think the PHY is, because | 
 | 1735 | 		 * the mdio read operation return 0 instead of 0xffff | 
 | 1736 | 		 * if a slave is not present on hw */ | 
 | 1737 | 		bus->phy_mask = ~(1 << priv->phy_id); | 
 | 1738 |  | 
 | 1739 | 		bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | 
 | 1740 | 		if (!bus->irq) { | 
 | 1741 | 			ret = -ENOMEM; | 
 | 1742 | 			goto out_free_mdio; | 
 | 1743 | 		} | 
 | 1744 |  | 
 | 1745 | 		if (priv->has_phy_interrupt) | 
 | 1746 | 			bus->irq[priv->phy_id] = priv->phy_interrupt; | 
 | 1747 | 		else | 
 | 1748 | 			bus->irq[priv->phy_id] = PHY_POLL; | 
 | 1749 |  | 
 | 1750 | 		ret = mdiobus_register(bus); | 
 | 1751 | 		if (ret) { | 
 | 1752 | 			dev_err(&pdev->dev, "unable to register mdio bus\n"); | 
 | 1753 | 			goto out_free_mdio; | 
 | 1754 | 		} | 
 | 1755 | 	} else { | 
 | 1756 |  | 
 | 1757 | 		/* run platform code to initialize PHY device */ | 
 | 1758 | 		if (pd->mii_config && | 
 | 1759 | 		    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, | 
 | 1760 | 				   bcm_enet_mdio_write_mii)) { | 
 | 1761 | 			dev_err(&pdev->dev, "unable to configure mdio bus\n"); | 
 | 1762 | 			goto out_uninit_hw; | 
 | 1763 | 		} | 
 | 1764 | 	} | 
 | 1765 |  | 
 | 1766 | 	spin_lock_init(&priv->rx_lock); | 
 | 1767 |  | 
 | 1768 | 	/* init rx timeout (used for oom) */ | 
 | 1769 | 	init_timer(&priv->rx_timeout); | 
 | 1770 | 	priv->rx_timeout.function = bcm_enet_refill_rx_timer; | 
 | 1771 | 	priv->rx_timeout.data = (unsigned long)dev; | 
 | 1772 |  | 
 | 1773 | 	/* init the mib update lock&work */ | 
 | 1774 | 	mutex_init(&priv->mib_update_lock); | 
 | 1775 | 	INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); | 
 | 1776 |  | 
 | 1777 | 	/* zero mib counters */ | 
 | 1778 | 	for (i = 0; i < ENET_MIB_REG_COUNT; i++) | 
 | 1779 | 		enet_writel(priv, 0, ENET_MIB_REG(i)); | 
 | 1780 |  | 
 | 1781 | 	/* register netdevice */ | 
 | 1782 | 	dev->netdev_ops = &bcm_enet_ops; | 
 | 1783 | 	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); | 
 | 1784 |  | 
 | 1785 | 	SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); | 
 | 1786 | 	SET_NETDEV_DEV(dev, &pdev->dev); | 
 | 1787 |  | 
 | 1788 | 	ret = register_netdev(dev); | 
 | 1789 | 	if (ret) | 
 | 1790 | 		goto out_unregister_mdio; | 
 | 1791 |  | 
 | 1792 | 	netif_carrier_off(dev); | 
 | 1793 | 	platform_set_drvdata(pdev, dev); | 
 | 1794 | 	priv->pdev = pdev; | 
 | 1795 | 	priv->net_dev = dev; | 
 | 1796 |  | 
 | 1797 | 	return 0; | 
 | 1798 |  | 
 | 1799 | out_unregister_mdio: | 
 | 1800 | 	if (priv->mii_bus) { | 
 | 1801 | 		mdiobus_unregister(priv->mii_bus); | 
 | 1802 | 		kfree(priv->mii_bus->irq); | 
 | 1803 | 	} | 
 | 1804 |  | 
 | 1805 | out_free_mdio: | 
 | 1806 | 	if (priv->mii_bus) | 
 | 1807 | 		mdiobus_free(priv->mii_bus); | 
 | 1808 |  | 
 | 1809 | out_uninit_hw: | 
 | 1810 | 	/* turn off mdc clock */ | 
 | 1811 | 	enet_writel(priv, 0, ENET_MIISC_REG); | 
 | 1812 | 	if (priv->phy_clk) { | 
 | 1813 | 		clk_disable(priv->phy_clk); | 
 | 1814 | 		clk_put(priv->phy_clk); | 
 | 1815 | 	} | 
 | 1816 |  | 
 | 1817 | out_put_clk_mac: | 
 | 1818 | 	clk_disable(priv->mac_clk); | 
 | 1819 | 	clk_put(priv->mac_clk); | 
 | 1820 |  | 
 | 1821 | out_unmap: | 
 | 1822 | 	iounmap(priv->base); | 
 | 1823 |  | 
 | 1824 | out_release_mem: | 
 | 1825 | 	release_mem_region(res_mem->start, iomem_size); | 
 | 1826 | out: | 
 | 1827 | 	free_netdev(dev); | 
 | 1828 | 	return ret; | 
 | 1829 | } | 
 | 1830 |  | 
 | 1831 |  | 
 | 1832 | /* | 
 | 1833 |  * exit func, stops hardware and unregisters netdevice | 
 | 1834 |  */ | 
 | 1835 | static int __devexit bcm_enet_remove(struct platform_device *pdev) | 
 | 1836 | { | 
 | 1837 | 	struct bcm_enet_priv *priv; | 
 | 1838 | 	struct net_device *dev; | 
 | 1839 | 	struct resource *res; | 
 | 1840 |  | 
 | 1841 | 	/* stop netdevice */ | 
 | 1842 | 	dev = platform_get_drvdata(pdev); | 
 | 1843 | 	priv = netdev_priv(dev); | 
 | 1844 | 	unregister_netdev(dev); | 
 | 1845 |  | 
 | 1846 | 	/* turn off mdc clock */ | 
 | 1847 | 	enet_writel(priv, 0, ENET_MIISC_REG); | 
 | 1848 |  | 
 | 1849 | 	if (priv->has_phy) { | 
 | 1850 | 		mdiobus_unregister(priv->mii_bus); | 
 | 1851 | 		kfree(priv->mii_bus->irq); | 
 | 1852 | 		mdiobus_free(priv->mii_bus); | 
 | 1853 | 	} else { | 
 | 1854 | 		struct bcm63xx_enet_platform_data *pd; | 
 | 1855 |  | 
 | 1856 | 		pd = pdev->dev.platform_data; | 
 | 1857 | 		if (pd && pd->mii_config) | 
 | 1858 | 			pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, | 
 | 1859 | 				       bcm_enet_mdio_write_mii); | 
 | 1860 | 	} | 
 | 1861 |  | 
 | 1862 | 	/* release device resources */ | 
 | 1863 | 	iounmap(priv->base); | 
 | 1864 | 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
| Joe Perches | 28f65c11 | 2011-06-09 09:13:32 -0700 | [diff] [blame] | 1865 | 	release_mem_region(res->start, resource_size(res)); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1866 |  | 
 | 1867 | 	/* disable hw block clocks */ | 
 | 1868 | 	if (priv->phy_clk) { | 
 | 1869 | 		clk_disable(priv->phy_clk); | 
 | 1870 | 		clk_put(priv->phy_clk); | 
 | 1871 | 	} | 
 | 1872 | 	clk_disable(priv->mac_clk); | 
 | 1873 | 	clk_put(priv->mac_clk); | 
 | 1874 |  | 
 | 1875 | 	platform_set_drvdata(pdev, NULL); | 
 | 1876 | 	free_netdev(dev); | 
 | 1877 | 	return 0; | 
 | 1878 | } | 
 | 1879 |  | 
 | 1880 | struct platform_driver bcm63xx_enet_driver = { | 
 | 1881 | 	.probe	= bcm_enet_probe, | 
 | 1882 | 	.remove	= __devexit_p(bcm_enet_remove), | 
 | 1883 | 	.driver	= { | 
 | 1884 | 		.name	= "bcm63xx_enet", | 
 | 1885 | 		.owner  = THIS_MODULE, | 
 | 1886 | 	}, | 
 | 1887 | }; | 
 | 1888 |  | 
 | 1889 | /* | 
 | 1890 |  * reserve & remap memory space shared between all macs | 
 | 1891 |  */ | 
 | 1892 | static int __devinit bcm_enet_shared_probe(struct platform_device *pdev) | 
 | 1893 | { | 
 | 1894 | 	struct resource *res; | 
 | 1895 | 	unsigned int iomem_size; | 
 | 1896 |  | 
 | 1897 | 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 1898 | 	if (!res) | 
 | 1899 | 		return -ENODEV; | 
 | 1900 |  | 
| Joe Perches | 28f65c11 | 2011-06-09 09:13:32 -0700 | [diff] [blame] | 1901 | 	iomem_size = resource_size(res); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1902 | 	if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) | 
 | 1903 | 		return -EBUSY; | 
 | 1904 |  | 
 | 1905 | 	bcm_enet_shared_base = ioremap(res->start, iomem_size); | 
 | 1906 | 	if (!bcm_enet_shared_base) { | 
 | 1907 | 		release_mem_region(res->start, iomem_size); | 
 | 1908 | 		return -ENOMEM; | 
 | 1909 | 	} | 
 | 1910 | 	return 0; | 
 | 1911 | } | 
 | 1912 |  | 
 | 1913 | static int __devexit bcm_enet_shared_remove(struct platform_device *pdev) | 
 | 1914 | { | 
 | 1915 | 	struct resource *res; | 
 | 1916 |  | 
 | 1917 | 	iounmap(bcm_enet_shared_base); | 
 | 1918 | 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
| Joe Perches | 28f65c11 | 2011-06-09 09:13:32 -0700 | [diff] [blame] | 1919 | 	release_mem_region(res->start, resource_size(res)); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1920 | 	return 0; | 
 | 1921 | } | 
 | 1922 |  | 
 | 1923 | /* | 
 | 1924 |  * this "shared" driver is needed because both macs share a single | 
 | 1925 |  * address space | 
 | 1926 |  */ | 
 | 1927 | struct platform_driver bcm63xx_enet_shared_driver = { | 
 | 1928 | 	.probe	= bcm_enet_shared_probe, | 
 | 1929 | 	.remove	= __devexit_p(bcm_enet_shared_remove), | 
 | 1930 | 	.driver	= { | 
 | 1931 | 		.name	= "bcm63xx_enet_shared", | 
 | 1932 | 		.owner  = THIS_MODULE, | 
 | 1933 | 	}, | 
 | 1934 | }; | 
 | 1935 |  | 
 | 1936 | /* | 
 | 1937 |  * entry point | 
 | 1938 |  */ | 
 | 1939 | static int __init bcm_enet_init(void) | 
 | 1940 | { | 
 | 1941 | 	int ret; | 
 | 1942 |  | 
 | 1943 | 	ret = platform_driver_register(&bcm63xx_enet_shared_driver); | 
 | 1944 | 	if (ret) | 
 | 1945 | 		return ret; | 
 | 1946 |  | 
 | 1947 | 	ret = platform_driver_register(&bcm63xx_enet_driver); | 
 | 1948 | 	if (ret) | 
 | 1949 | 		platform_driver_unregister(&bcm63xx_enet_shared_driver); | 
 | 1950 |  | 
 | 1951 | 	return ret; | 
 | 1952 | } | 
 | 1953 |  | 
 | 1954 | static void __exit bcm_enet_exit(void) | 
 | 1955 | { | 
 | 1956 | 	platform_driver_unregister(&bcm63xx_enet_driver); | 
 | 1957 | 	platform_driver_unregister(&bcm63xx_enet_shared_driver); | 
 | 1958 | } | 
 | 1959 |  | 
 | 1960 |  | 
 | 1961 | module_init(bcm_enet_init); | 
 | 1962 | module_exit(bcm_enet_exit); | 
 | 1963 |  | 
 | 1964 | MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); | 
 | 1965 | MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); | 
 | 1966 | MODULE_LICENSE("GPL"); |