| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Driver for BCM963xx builtin Ethernet mac | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License as published by | 
 | 8 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 9 |  * (at your option) any later version. | 
 | 10 |  * | 
 | 11 |  * This program is distributed in the hope that it will be useful, | 
 | 12 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 13 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 14 |  * GNU General Public License for more details. | 
 | 15 |  * | 
 | 16 |  * You should have received a copy of the GNU General Public License | 
 | 17 |  * along with this program; if not, write to the Free Software | 
 | 18 |  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 
 | 19 |  */ | 
 | 20 | #include <linux/init.h> | 
 | 21 | #include <linux/module.h> | 
 | 22 | #include <linux/clk.h> | 
 | 23 | #include <linux/etherdevice.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 24 | #include <linux/slab.h> | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 25 | #include <linux/delay.h> | 
 | 26 | #include <linux/ethtool.h> | 
 | 27 | #include <linux/crc32.h> | 
 | 28 | #include <linux/err.h> | 
 | 29 | #include <linux/dma-mapping.h> | 
 | 30 | #include <linux/platform_device.h> | 
 | 31 | #include <linux/if_vlan.h> | 
 | 32 |  | 
 | 33 | #include <bcm63xx_dev_enet.h> | 
 | 34 | #include "bcm63xx_enet.h" | 
 | 35 |  | 
 | 36 | static char bcm_enet_driver_name[] = "bcm63xx_enet"; | 
 | 37 | static char bcm_enet_driver_version[] = "1.0"; | 
 | 38 |  | 
 | 39 | static int copybreak __read_mostly = 128; | 
 | 40 | module_param(copybreak, int, 0); | 
 | 41 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); | 
 | 42 |  | 
 | 43 | /* io memory shared between all devices */ | 
 | 44 | static void __iomem *bcm_enet_shared_base; | 
 | 45 |  | 
 | 46 | /* | 
 | 47 |  * io helpers to access mac registers | 
 | 48 |  */ | 
 | 49 | static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) | 
 | 50 | { | 
 | 51 | 	return bcm_readl(priv->base + off); | 
 | 52 | } | 
 | 53 |  | 
 | 54 | static inline void enet_writel(struct bcm_enet_priv *priv, | 
 | 55 | 			       u32 val, u32 off) | 
 | 56 | { | 
 | 57 | 	bcm_writel(val, priv->base + off); | 
 | 58 | } | 
 | 59 |  | 
 | 60 | /* | 
 | 61 |  * io helpers to access shared registers | 
 | 62 |  */ | 
 | 63 | static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) | 
 | 64 | { | 
 | 65 | 	return bcm_readl(bcm_enet_shared_base + off); | 
 | 66 | } | 
 | 67 |  | 
 | 68 | static inline void enet_dma_writel(struct bcm_enet_priv *priv, | 
 | 69 | 				       u32 val, u32 off) | 
 | 70 | { | 
 | 71 | 	bcm_writel(val, bcm_enet_shared_base + off); | 
 | 72 | } | 
 | 73 |  | 
 | 74 | /* | 
 | 75 |  * write given data into mii register and wait for transfer to end | 
 | 76 |  * with timeout (average measured transfer time is 25us) | 
 | 77 |  */ | 
 | 78 | static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) | 
 | 79 | { | 
 | 80 | 	int limit; | 
 | 81 |  | 
 | 82 | 	/* make sure mii interrupt status is cleared */ | 
 | 83 | 	enet_writel(priv, ENET_IR_MII, ENET_IR_REG); | 
 | 84 |  | 
 | 85 | 	enet_writel(priv, data, ENET_MIIDATA_REG); | 
 | 86 | 	wmb(); | 
 | 87 |  | 
 | 88 | 	/* busy wait on mii interrupt bit, with timeout */ | 
 | 89 | 	limit = 1000; | 
 | 90 | 	do { | 
 | 91 | 		if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) | 
 | 92 | 			break; | 
 | 93 | 		udelay(1); | 
| roel kluin | ec1652a | 2009-09-21 10:08:48 +0000 | [diff] [blame] | 94 | 	} while (limit-- > 0); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 95 |  | 
 | 96 | 	return (limit < 0) ? 1 : 0; | 
 | 97 | } | 
 | 98 |  | 
 | 99 | /* | 
 | 100 |  * MII internal read callback | 
 | 101 |  */ | 
 | 102 | static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, | 
 | 103 | 			      int regnum) | 
 | 104 | { | 
 | 105 | 	u32 tmp, val; | 
 | 106 |  | 
 | 107 | 	tmp = regnum << ENET_MIIDATA_REG_SHIFT; | 
 | 108 | 	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; | 
 | 109 | 	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; | 
 | 110 | 	tmp |= ENET_MIIDATA_OP_READ_MASK; | 
 | 111 |  | 
 | 112 | 	if (do_mdio_op(priv, tmp)) | 
 | 113 | 		return -1; | 
 | 114 |  | 
 | 115 | 	val = enet_readl(priv, ENET_MIIDATA_REG); | 
 | 116 | 	val &= 0xffff; | 
 | 117 | 	return val; | 
 | 118 | } | 
 | 119 |  | 
 | 120 | /* | 
 | 121 |  * MII internal write callback | 
 | 122 |  */ | 
 | 123 | static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, | 
 | 124 | 			       int regnum, u16 value) | 
 | 125 | { | 
 | 126 | 	u32 tmp; | 
 | 127 |  | 
 | 128 | 	tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; | 
 | 129 | 	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; | 
 | 130 | 	tmp |= regnum << ENET_MIIDATA_REG_SHIFT; | 
 | 131 | 	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; | 
 | 132 | 	tmp |= ENET_MIIDATA_OP_WRITE_MASK; | 
 | 133 |  | 
 | 134 | 	(void)do_mdio_op(priv, tmp); | 
 | 135 | 	return 0; | 
 | 136 | } | 
 | 137 |  | 
 | 138 | /* | 
 | 139 |  * MII read callback from phylib | 
 | 140 |  */ | 
 | 141 | static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, | 
 | 142 | 				     int regnum) | 
 | 143 | { | 
 | 144 | 	return bcm_enet_mdio_read(bus->priv, mii_id, regnum); | 
 | 145 | } | 
 | 146 |  | 
 | 147 | /* | 
 | 148 |  * MII write callback from phylib | 
 | 149 |  */ | 
 | 150 | static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, | 
 | 151 | 				      int regnum, u16 value) | 
 | 152 | { | 
 | 153 | 	return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); | 
 | 154 | } | 
 | 155 |  | 
 | 156 | /* | 
 | 157 |  * MII read callback from mii core | 
 | 158 |  */ | 
 | 159 | static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, | 
 | 160 | 				  int regnum) | 
 | 161 | { | 
 | 162 | 	return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); | 
 | 163 | } | 
 | 164 |  | 
 | 165 | /* | 
 | 166 |  * MII write callback from mii core | 
 | 167 |  */ | 
 | 168 | static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, | 
 | 169 | 				    int regnum, int value) | 
 | 170 | { | 
 | 171 | 	bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); | 
 | 172 | } | 
 | 173 |  | 
 | 174 | /* | 
 | 175 |  * refill rx queue | 
 | 176 |  */ | 
 | 177 | static int bcm_enet_refill_rx(struct net_device *dev) | 
 | 178 | { | 
 | 179 | 	struct bcm_enet_priv *priv; | 
 | 180 |  | 
 | 181 | 	priv = netdev_priv(dev); | 
 | 182 |  | 
 | 183 | 	while (priv->rx_desc_count < priv->rx_ring_size) { | 
 | 184 | 		struct bcm_enet_desc *desc; | 
 | 185 | 		struct sk_buff *skb; | 
 | 186 | 		dma_addr_t p; | 
 | 187 | 		int desc_idx; | 
 | 188 | 		u32 len_stat; | 
 | 189 |  | 
 | 190 | 		desc_idx = priv->rx_dirty_desc; | 
 | 191 | 		desc = &priv->rx_desc_cpu[desc_idx]; | 
 | 192 |  | 
 | 193 | 		if (!priv->rx_skb[desc_idx]) { | 
 | 194 | 			skb = netdev_alloc_skb(dev, priv->rx_skb_size); | 
 | 195 | 			if (!skb) | 
 | 196 | 				break; | 
 | 197 | 			priv->rx_skb[desc_idx] = skb; | 
 | 198 |  | 
 | 199 | 			p = dma_map_single(&priv->pdev->dev, skb->data, | 
 | 200 | 					   priv->rx_skb_size, | 
 | 201 | 					   DMA_FROM_DEVICE); | 
 | 202 | 			desc->address = p; | 
 | 203 | 		} | 
 | 204 |  | 
 | 205 | 		len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; | 
 | 206 | 		len_stat |= DMADESC_OWNER_MASK; | 
 | 207 | 		if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { | 
 | 208 | 			len_stat |= DMADESC_WRAP_MASK; | 
 | 209 | 			priv->rx_dirty_desc = 0; | 
 | 210 | 		} else { | 
 | 211 | 			priv->rx_dirty_desc++; | 
 | 212 | 		} | 
 | 213 | 		wmb(); | 
 | 214 | 		desc->len_stat = len_stat; | 
 | 215 |  | 
 | 216 | 		priv->rx_desc_count++; | 
 | 217 |  | 
 | 218 | 		/* tell dma engine we allocated one buffer */ | 
 | 219 | 		enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); | 
 | 220 | 	} | 
 | 221 |  | 
 | 222 | 	/* If rx ring is still empty, set a timer to try allocating | 
 | 223 | 	 * again at a later time. */ | 
 | 224 | 	if (priv->rx_desc_count == 0 && netif_running(dev)) { | 
 | 225 | 		dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); | 
 | 226 | 		priv->rx_timeout.expires = jiffies + HZ; | 
 | 227 | 		add_timer(&priv->rx_timeout); | 
 | 228 | 	} | 
 | 229 |  | 
 | 230 | 	return 0; | 
 | 231 | } | 
 | 232 |  | 
 | 233 | /* | 
 | 234 |  * timer callback to defer refill rx queue in case we're OOM | 
 | 235 |  */ | 
 | 236 | static void bcm_enet_refill_rx_timer(unsigned long data) | 
 | 237 | { | 
 | 238 | 	struct net_device *dev; | 
 | 239 | 	struct bcm_enet_priv *priv; | 
 | 240 |  | 
 | 241 | 	dev = (struct net_device *)data; | 
 | 242 | 	priv = netdev_priv(dev); | 
 | 243 |  | 
 | 244 | 	spin_lock(&priv->rx_lock); | 
 | 245 | 	bcm_enet_refill_rx((struct net_device *)data); | 
 | 246 | 	spin_unlock(&priv->rx_lock); | 
 | 247 | } | 
 | 248 |  | 
 | 249 | /* | 
 | 250 |  * extract packet from rx queue | 
 | 251 |  */ | 
 | 252 | static int bcm_enet_receive_queue(struct net_device *dev, int budget) | 
 | 253 | { | 
 | 254 | 	struct bcm_enet_priv *priv; | 
 | 255 | 	struct device *kdev; | 
 | 256 | 	int processed; | 
 | 257 |  | 
 | 258 | 	priv = netdev_priv(dev); | 
 | 259 | 	kdev = &priv->pdev->dev; | 
 | 260 | 	processed = 0; | 
 | 261 |  | 
 | 262 | 	/* don't scan ring further than number of refilled | 
 | 263 | 	 * descriptor */ | 
 | 264 | 	if (budget > priv->rx_desc_count) | 
 | 265 | 		budget = priv->rx_desc_count; | 
 | 266 |  | 
 | 267 | 	do { | 
 | 268 | 		struct bcm_enet_desc *desc; | 
 | 269 | 		struct sk_buff *skb; | 
 | 270 | 		int desc_idx; | 
 | 271 | 		u32 len_stat; | 
 | 272 | 		unsigned int len; | 
 | 273 |  | 
 | 274 | 		desc_idx = priv->rx_curr_desc; | 
 | 275 | 		desc = &priv->rx_desc_cpu[desc_idx]; | 
 | 276 |  | 
 | 277 | 		/* make sure we actually read the descriptor status at | 
 | 278 | 		 * each loop */ | 
 | 279 | 		rmb(); | 
 | 280 |  | 
 | 281 | 		len_stat = desc->len_stat; | 
 | 282 |  | 
 | 283 | 		/* break if dma ownership belongs to hw */ | 
 | 284 | 		if (len_stat & DMADESC_OWNER_MASK) | 
 | 285 | 			break; | 
 | 286 |  | 
 | 287 | 		processed++; | 
 | 288 | 		priv->rx_curr_desc++; | 
 | 289 | 		if (priv->rx_curr_desc == priv->rx_ring_size) | 
 | 290 | 			priv->rx_curr_desc = 0; | 
 | 291 | 		priv->rx_desc_count--; | 
 | 292 |  | 
 | 293 | 		/* if the packet does not have start of packet _and_ | 
 | 294 | 		 * end of packet flag set, then just recycle it */ | 
 | 295 | 		if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { | 
 | 296 | 			priv->stats.rx_dropped++; | 
 | 297 | 			continue; | 
 | 298 | 		} | 
 | 299 |  | 
 | 300 | 		/* recycle packet if it's marked as bad */ | 
 | 301 | 		if (unlikely(len_stat & DMADESC_ERR_MASK)) { | 
 | 302 | 			priv->stats.rx_errors++; | 
 | 303 |  | 
 | 304 | 			if (len_stat & DMADESC_OVSIZE_MASK) | 
 | 305 | 				priv->stats.rx_length_errors++; | 
 | 306 | 			if (len_stat & DMADESC_CRC_MASK) | 
 | 307 | 				priv->stats.rx_crc_errors++; | 
 | 308 | 			if (len_stat & DMADESC_UNDER_MASK) | 
 | 309 | 				priv->stats.rx_frame_errors++; | 
 | 310 | 			if (len_stat & DMADESC_OV_MASK) | 
 | 311 | 				priv->stats.rx_fifo_errors++; | 
 | 312 | 			continue; | 
 | 313 | 		} | 
 | 314 |  | 
 | 315 | 		/* valid packet */ | 
 | 316 | 		skb = priv->rx_skb[desc_idx]; | 
 | 317 | 		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; | 
 | 318 | 		/* don't include FCS */ | 
 | 319 | 		len -= 4; | 
 | 320 |  | 
 | 321 | 		if (len < copybreak) { | 
 | 322 | 			struct sk_buff *nskb; | 
 | 323 |  | 
| Eric Dumazet | 89d71a6 | 2009-10-13 05:34:20 +0000 | [diff] [blame] | 324 | 			nskb = netdev_alloc_skb_ip_align(dev, len); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 325 | 			if (!nskb) { | 
 | 326 | 				/* forget packet, just rearm desc */ | 
 | 327 | 				priv->stats.rx_dropped++; | 
 | 328 | 				continue; | 
 | 329 | 			} | 
 | 330 |  | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 331 | 			dma_sync_single_for_cpu(kdev, desc->address, | 
 | 332 | 						len, DMA_FROM_DEVICE); | 
 | 333 | 			memcpy(nskb->data, skb->data, len); | 
 | 334 | 			dma_sync_single_for_device(kdev, desc->address, | 
 | 335 | 						   len, DMA_FROM_DEVICE); | 
 | 336 | 			skb = nskb; | 
 | 337 | 		} else { | 
 | 338 | 			dma_unmap_single(&priv->pdev->dev, desc->address, | 
 | 339 | 					 priv->rx_skb_size, DMA_FROM_DEVICE); | 
 | 340 | 			priv->rx_skb[desc_idx] = NULL; | 
 | 341 | 		} | 
 | 342 |  | 
 | 343 | 		skb_put(skb, len); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 344 | 		skb->protocol = eth_type_trans(skb, dev); | 
 | 345 | 		priv->stats.rx_packets++; | 
 | 346 | 		priv->stats.rx_bytes += len; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 347 | 		netif_receive_skb(skb); | 
 | 348 |  | 
 | 349 | 	} while (--budget > 0); | 
 | 350 |  | 
 | 351 | 	if (processed || !priv->rx_desc_count) { | 
 | 352 | 		bcm_enet_refill_rx(dev); | 
 | 353 |  | 
 | 354 | 		/* kick rx dma */ | 
 | 355 | 		enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, | 
 | 356 | 				ENETDMA_CHANCFG_REG(priv->rx_chan)); | 
 | 357 | 	} | 
 | 358 |  | 
 | 359 | 	return processed; | 
 | 360 | } | 
 | 361 |  | 
 | 362 |  | 
 | 363 | /* | 
 | 364 |  * try to or force reclaim of transmitted buffers | 
 | 365 |  */ | 
 | 366 | static int bcm_enet_tx_reclaim(struct net_device *dev, int force) | 
 | 367 | { | 
 | 368 | 	struct bcm_enet_priv *priv; | 
 | 369 | 	int released; | 
 | 370 |  | 
 | 371 | 	priv = netdev_priv(dev); | 
 | 372 | 	released = 0; | 
 | 373 |  | 
 | 374 | 	while (priv->tx_desc_count < priv->tx_ring_size) { | 
 | 375 | 		struct bcm_enet_desc *desc; | 
 | 376 | 		struct sk_buff *skb; | 
 | 377 |  | 
 | 378 | 		/* We run in a bh and fight against start_xmit, which | 
 | 379 | 		 * is called with bh disabled  */ | 
 | 380 | 		spin_lock(&priv->tx_lock); | 
 | 381 |  | 
 | 382 | 		desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; | 
 | 383 |  | 
 | 384 | 		if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { | 
 | 385 | 			spin_unlock(&priv->tx_lock); | 
 | 386 | 			break; | 
 | 387 | 		} | 
 | 388 |  | 
 | 389 | 		/* ensure other field of the descriptor were not read | 
 | 390 | 		 * before we checked ownership */ | 
 | 391 | 		rmb(); | 
 | 392 |  | 
 | 393 | 		skb = priv->tx_skb[priv->tx_dirty_desc]; | 
 | 394 | 		priv->tx_skb[priv->tx_dirty_desc] = NULL; | 
 | 395 | 		dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, | 
 | 396 | 				 DMA_TO_DEVICE); | 
 | 397 |  | 
 | 398 | 		priv->tx_dirty_desc++; | 
 | 399 | 		if (priv->tx_dirty_desc == priv->tx_ring_size) | 
 | 400 | 			priv->tx_dirty_desc = 0; | 
 | 401 | 		priv->tx_desc_count++; | 
 | 402 |  | 
 | 403 | 		spin_unlock(&priv->tx_lock); | 
 | 404 |  | 
 | 405 | 		if (desc->len_stat & DMADESC_UNDER_MASK) | 
 | 406 | 			priv->stats.tx_errors++; | 
 | 407 |  | 
 | 408 | 		dev_kfree_skb(skb); | 
 | 409 | 		released++; | 
 | 410 | 	} | 
 | 411 |  | 
 | 412 | 	if (netif_queue_stopped(dev) && released) | 
 | 413 | 		netif_wake_queue(dev); | 
 | 414 |  | 
 | 415 | 	return released; | 
 | 416 | } | 
 | 417 |  | 
 | 418 | /* | 
 | 419 |  * poll func, called by network core | 
 | 420 |  */ | 
 | 421 | static int bcm_enet_poll(struct napi_struct *napi, int budget) | 
 | 422 | { | 
 | 423 | 	struct bcm_enet_priv *priv; | 
 | 424 | 	struct net_device *dev; | 
 | 425 | 	int tx_work_done, rx_work_done; | 
 | 426 |  | 
 | 427 | 	priv = container_of(napi, struct bcm_enet_priv, napi); | 
 | 428 | 	dev = priv->net_dev; | 
 | 429 |  | 
 | 430 | 	/* ack interrupts */ | 
 | 431 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 432 | 			ENETDMA_IR_REG(priv->rx_chan)); | 
 | 433 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 434 | 			ENETDMA_IR_REG(priv->tx_chan)); | 
 | 435 |  | 
 | 436 | 	/* reclaim sent skb */ | 
 | 437 | 	tx_work_done = bcm_enet_tx_reclaim(dev, 0); | 
 | 438 |  | 
 | 439 | 	spin_lock(&priv->rx_lock); | 
 | 440 | 	rx_work_done = bcm_enet_receive_queue(dev, budget); | 
 | 441 | 	spin_unlock(&priv->rx_lock); | 
 | 442 |  | 
 | 443 | 	if (rx_work_done >= budget || tx_work_done > 0) { | 
 | 444 | 		/* rx/tx queue is not yet empty/clean */ | 
 | 445 | 		return rx_work_done; | 
 | 446 | 	} | 
 | 447 |  | 
 | 448 | 	/* no more packet in rx/tx queue, remove device from poll | 
 | 449 | 	 * queue */ | 
 | 450 | 	napi_complete(napi); | 
 | 451 |  | 
 | 452 | 	/* restore rx/tx interrupt */ | 
 | 453 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 454 | 			ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 455 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 456 | 			ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 457 |  | 
 | 458 | 	return rx_work_done; | 
 | 459 | } | 
 | 460 |  | 
 | 461 | /* | 
 | 462 |  * mac interrupt handler | 
 | 463 |  */ | 
 | 464 | static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) | 
 | 465 | { | 
 | 466 | 	struct net_device *dev; | 
 | 467 | 	struct bcm_enet_priv *priv; | 
 | 468 | 	u32 stat; | 
 | 469 |  | 
 | 470 | 	dev = dev_id; | 
 | 471 | 	priv = netdev_priv(dev); | 
 | 472 |  | 
 | 473 | 	stat = enet_readl(priv, ENET_IR_REG); | 
 | 474 | 	if (!(stat & ENET_IR_MIB)) | 
 | 475 | 		return IRQ_NONE; | 
 | 476 |  | 
 | 477 | 	/* clear & mask interrupt */ | 
 | 478 | 	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); | 
 | 479 | 	enet_writel(priv, 0, ENET_IRMASK_REG); | 
 | 480 |  | 
 | 481 | 	/* read mib registers in workqueue */ | 
 | 482 | 	schedule_work(&priv->mib_update_task); | 
 | 483 |  | 
 | 484 | 	return IRQ_HANDLED; | 
 | 485 | } | 
 | 486 |  | 
 | 487 | /* | 
 | 488 |  * rx/tx dma interrupt handler | 
 | 489 |  */ | 
 | 490 | static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) | 
 | 491 | { | 
 | 492 | 	struct net_device *dev; | 
 | 493 | 	struct bcm_enet_priv *priv; | 
 | 494 |  | 
 | 495 | 	dev = dev_id; | 
 | 496 | 	priv = netdev_priv(dev); | 
 | 497 |  | 
 | 498 | 	/* mask rx/tx interrupts */ | 
 | 499 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 500 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 501 |  | 
 | 502 | 	napi_schedule(&priv->napi); | 
 | 503 |  | 
 | 504 | 	return IRQ_HANDLED; | 
 | 505 | } | 
 | 506 |  | 
 | 507 | /* | 
 | 508 |  * tx request callback | 
 | 509 |  */ | 
 | 510 | static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 
 | 511 | { | 
 | 512 | 	struct bcm_enet_priv *priv; | 
 | 513 | 	struct bcm_enet_desc *desc; | 
 | 514 | 	u32 len_stat; | 
 | 515 | 	int ret; | 
 | 516 |  | 
 | 517 | 	priv = netdev_priv(dev); | 
 | 518 |  | 
 | 519 | 	/* lock against tx reclaim */ | 
 | 520 | 	spin_lock(&priv->tx_lock); | 
 | 521 |  | 
 | 522 | 	/* make sure  the tx hw queue  is not full,  should not happen | 
 | 523 | 	 * since we stop queue before it's the case */ | 
 | 524 | 	if (unlikely(!priv->tx_desc_count)) { | 
 | 525 | 		netif_stop_queue(dev); | 
 | 526 | 		dev_err(&priv->pdev->dev, "xmit called with no tx desc " | 
 | 527 | 			"available?\n"); | 
 | 528 | 		ret = NETDEV_TX_BUSY; | 
 | 529 | 		goto out_unlock; | 
 | 530 | 	} | 
 | 531 |  | 
 | 532 | 	/* point to the next available desc */ | 
 | 533 | 	desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; | 
 | 534 | 	priv->tx_skb[priv->tx_curr_desc] = skb; | 
 | 535 |  | 
 | 536 | 	/* fill descriptor */ | 
 | 537 | 	desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, | 
 | 538 | 				       DMA_TO_DEVICE); | 
 | 539 |  | 
 | 540 | 	len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; | 
 | 541 | 	len_stat |= DMADESC_ESOP_MASK | | 
 | 542 | 		DMADESC_APPEND_CRC | | 
 | 543 | 		DMADESC_OWNER_MASK; | 
 | 544 |  | 
 | 545 | 	priv->tx_curr_desc++; | 
 | 546 | 	if (priv->tx_curr_desc == priv->tx_ring_size) { | 
 | 547 | 		priv->tx_curr_desc = 0; | 
 | 548 | 		len_stat |= DMADESC_WRAP_MASK; | 
 | 549 | 	} | 
 | 550 | 	priv->tx_desc_count--; | 
 | 551 |  | 
 | 552 | 	/* dma might be already polling, make sure we update desc | 
 | 553 | 	 * fields in correct order */ | 
 | 554 | 	wmb(); | 
 | 555 | 	desc->len_stat = len_stat; | 
 | 556 | 	wmb(); | 
 | 557 |  | 
 | 558 | 	/* kick tx dma */ | 
 | 559 | 	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, | 
 | 560 | 			ENETDMA_CHANCFG_REG(priv->tx_chan)); | 
 | 561 |  | 
 | 562 | 	/* stop queue if no more desc available */ | 
 | 563 | 	if (!priv->tx_desc_count) | 
 | 564 | 		netif_stop_queue(dev); | 
 | 565 |  | 
 | 566 | 	priv->stats.tx_bytes += skb->len; | 
 | 567 | 	priv->stats.tx_packets++; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 568 | 	ret = NETDEV_TX_OK; | 
 | 569 |  | 
 | 570 | out_unlock: | 
 | 571 | 	spin_unlock(&priv->tx_lock); | 
 | 572 | 	return ret; | 
 | 573 | } | 
 | 574 |  | 
 | 575 | /* | 
 | 576 |  * Change the interface's mac address. | 
 | 577 |  */ | 
 | 578 | static int bcm_enet_set_mac_address(struct net_device *dev, void *p) | 
 | 579 | { | 
 | 580 | 	struct bcm_enet_priv *priv; | 
 | 581 | 	struct sockaddr *addr = p; | 
 | 582 | 	u32 val; | 
 | 583 |  | 
 | 584 | 	priv = netdev_priv(dev); | 
 | 585 | 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 
 | 586 |  | 
 | 587 | 	/* use perfect match register 0 to store my mac address */ | 
 | 588 | 	val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | | 
 | 589 | 		(dev->dev_addr[4] << 8) | dev->dev_addr[5]; | 
 | 590 | 	enet_writel(priv, val, ENET_PML_REG(0)); | 
 | 591 |  | 
 | 592 | 	val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); | 
 | 593 | 	val |= ENET_PMH_DATAVALID_MASK; | 
 | 594 | 	enet_writel(priv, val, ENET_PMH_REG(0)); | 
 | 595 |  | 
 | 596 | 	return 0; | 
 | 597 | } | 
 | 598 |  | 
 | 599 | /* | 
 | 600 |  * Change rx mode (promiscous/allmulti) and update multicast list | 
 | 601 |  */ | 
 | 602 | static void bcm_enet_set_multicast_list(struct net_device *dev) | 
 | 603 | { | 
 | 604 | 	struct bcm_enet_priv *priv; | 
| Jiri Pirko | 22bedad | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 605 | 	struct netdev_hw_addr *ha; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 606 | 	u32 val; | 
 | 607 | 	int i; | 
 | 608 |  | 
 | 609 | 	priv = netdev_priv(dev); | 
 | 610 |  | 
 | 611 | 	val = enet_readl(priv, ENET_RXCFG_REG); | 
 | 612 |  | 
 | 613 | 	if (dev->flags & IFF_PROMISC) | 
 | 614 | 		val |= ENET_RXCFG_PROMISC_MASK; | 
 | 615 | 	else | 
 | 616 | 		val &= ~ENET_RXCFG_PROMISC_MASK; | 
 | 617 |  | 
 | 618 | 	/* only 3 perfect match registers left, first one is used for | 
 | 619 | 	 * own mac address */ | 
| Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 620 | 	if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 621 | 		val |= ENET_RXCFG_ALLMCAST_MASK; | 
 | 622 | 	else | 
 | 623 | 		val &= ~ENET_RXCFG_ALLMCAST_MASK; | 
 | 624 |  | 
 | 625 | 	/* no need to set perfect match registers if we catch all | 
 | 626 | 	 * multicast */ | 
 | 627 | 	if (val & ENET_RXCFG_ALLMCAST_MASK) { | 
 | 628 | 		enet_writel(priv, val, ENET_RXCFG_REG); | 
 | 629 | 		return; | 
 | 630 | 	} | 
 | 631 |  | 
| Jiri Pirko | 0ddf477 | 2010-02-20 00:13:58 +0000 | [diff] [blame] | 632 | 	i = 0; | 
| Jiri Pirko | 22bedad | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 633 | 	netdev_for_each_mc_addr(ha, dev) { | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 634 | 		u8 *dmi_addr; | 
 | 635 | 		u32 tmp; | 
 | 636 |  | 
| Jiri Pirko | 0ddf477 | 2010-02-20 00:13:58 +0000 | [diff] [blame] | 637 | 		if (i == 3) | 
 | 638 | 			break; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 639 | 		/* update perfect match registers */ | 
| Jiri Pirko | 22bedad | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 640 | 		dmi_addr = ha->addr; | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 641 | 		tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | | 
 | 642 | 			(dmi_addr[4] << 8) | dmi_addr[5]; | 
 | 643 | 		enet_writel(priv, tmp, ENET_PML_REG(i + 1)); | 
 | 644 |  | 
 | 645 | 		tmp = (dmi_addr[0] << 8 | dmi_addr[1]); | 
 | 646 | 		tmp |= ENET_PMH_DATAVALID_MASK; | 
| Jiri Pirko | 0ddf477 | 2010-02-20 00:13:58 +0000 | [diff] [blame] | 647 | 		enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 648 | 	} | 
 | 649 |  | 
 | 650 | 	for (; i < 3; i++) { | 
 | 651 | 		enet_writel(priv, 0, ENET_PML_REG(i + 1)); | 
 | 652 | 		enet_writel(priv, 0, ENET_PMH_REG(i + 1)); | 
 | 653 | 	} | 
 | 654 |  | 
 | 655 | 	enet_writel(priv, val, ENET_RXCFG_REG); | 
 | 656 | } | 
 | 657 |  | 
 | 658 | /* | 
 | 659 |  * set mac duplex parameters | 
 | 660 |  */ | 
 | 661 | static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) | 
 | 662 | { | 
 | 663 | 	u32 val; | 
 | 664 |  | 
 | 665 | 	val = enet_readl(priv, ENET_TXCTL_REG); | 
 | 666 | 	if (fullduplex) | 
 | 667 | 		val |= ENET_TXCTL_FD_MASK; | 
 | 668 | 	else | 
 | 669 | 		val &= ~ENET_TXCTL_FD_MASK; | 
 | 670 | 	enet_writel(priv, val, ENET_TXCTL_REG); | 
 | 671 | } | 
 | 672 |  | 
 | 673 | /* | 
 | 674 |  * set mac flow control parameters | 
 | 675 |  */ | 
 | 676 | static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) | 
 | 677 | { | 
 | 678 | 	u32 val; | 
 | 679 |  | 
 | 680 | 	/* rx flow control (pause frame handling) */ | 
 | 681 | 	val = enet_readl(priv, ENET_RXCFG_REG); | 
 | 682 | 	if (rx_en) | 
 | 683 | 		val |= ENET_RXCFG_ENFLOW_MASK; | 
 | 684 | 	else | 
 | 685 | 		val &= ~ENET_RXCFG_ENFLOW_MASK; | 
 | 686 | 	enet_writel(priv, val, ENET_RXCFG_REG); | 
 | 687 |  | 
 | 688 | 	/* tx flow control (pause frame generation) */ | 
 | 689 | 	val = enet_dma_readl(priv, ENETDMA_CFG_REG); | 
 | 690 | 	if (tx_en) | 
 | 691 | 		val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); | 
 | 692 | 	else | 
 | 693 | 		val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); | 
 | 694 | 	enet_dma_writel(priv, val, ENETDMA_CFG_REG); | 
 | 695 | } | 
 | 696 |  | 
 | 697 | /* | 
 | 698 |  * link changed callback (from phylib) | 
 | 699 |  */ | 
 | 700 | static void bcm_enet_adjust_phy_link(struct net_device *dev) | 
 | 701 | { | 
 | 702 | 	struct bcm_enet_priv *priv; | 
 | 703 | 	struct phy_device *phydev; | 
 | 704 | 	int status_changed; | 
 | 705 |  | 
 | 706 | 	priv = netdev_priv(dev); | 
 | 707 | 	phydev = priv->phydev; | 
 | 708 | 	status_changed = 0; | 
 | 709 |  | 
 | 710 | 	if (priv->old_link != phydev->link) { | 
 | 711 | 		status_changed = 1; | 
 | 712 | 		priv->old_link = phydev->link; | 
 | 713 | 	} | 
 | 714 |  | 
 | 715 | 	/* reflect duplex change in mac configuration */ | 
 | 716 | 	if (phydev->link && phydev->duplex != priv->old_duplex) { | 
 | 717 | 		bcm_enet_set_duplex(priv, | 
 | 718 | 				    (phydev->duplex == DUPLEX_FULL) ? 1 : 0); | 
 | 719 | 		status_changed = 1; | 
 | 720 | 		priv->old_duplex = phydev->duplex; | 
 | 721 | 	} | 
 | 722 |  | 
 | 723 | 	/* enable flow control if remote advertise it (trust phylib to | 
 | 724 | 	 * check that duplex is full */ | 
 | 725 | 	if (phydev->link && phydev->pause != priv->old_pause) { | 
 | 726 | 		int rx_pause_en, tx_pause_en; | 
 | 727 |  | 
 | 728 | 		if (phydev->pause) { | 
 | 729 | 			/* pause was advertised by lpa and us */ | 
 | 730 | 			rx_pause_en = 1; | 
 | 731 | 			tx_pause_en = 1; | 
 | 732 | 		} else if (!priv->pause_auto) { | 
 | 733 | 			/* pause setting overrided by user */ | 
 | 734 | 			rx_pause_en = priv->pause_rx; | 
 | 735 | 			tx_pause_en = priv->pause_tx; | 
 | 736 | 		} else { | 
 | 737 | 			rx_pause_en = 0; | 
 | 738 | 			tx_pause_en = 0; | 
 | 739 | 		} | 
 | 740 |  | 
 | 741 | 		bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); | 
 | 742 | 		status_changed = 1; | 
 | 743 | 		priv->old_pause = phydev->pause; | 
 | 744 | 	} | 
 | 745 |  | 
 | 746 | 	if (status_changed) { | 
 | 747 | 		pr_info("%s: link %s", dev->name, phydev->link ? | 
 | 748 | 			"UP" : "DOWN"); | 
 | 749 | 		if (phydev->link) | 
 | 750 | 			pr_cont(" - %d/%s - flow control %s", phydev->speed, | 
 | 751 | 			       DUPLEX_FULL == phydev->duplex ? "full" : "half", | 
 | 752 | 			       phydev->pause == 1 ? "rx&tx" : "off"); | 
 | 753 |  | 
 | 754 | 		pr_cont("\n"); | 
 | 755 | 	} | 
 | 756 | } | 
 | 757 |  | 
 | 758 | /* | 
 | 759 |  * link changed callback (if phylib is not used) | 
 | 760 |  */ | 
 | 761 | static void bcm_enet_adjust_link(struct net_device *dev) | 
 | 762 | { | 
 | 763 | 	struct bcm_enet_priv *priv; | 
 | 764 |  | 
 | 765 | 	priv = netdev_priv(dev); | 
 | 766 | 	bcm_enet_set_duplex(priv, priv->force_duplex_full); | 
 | 767 | 	bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); | 
 | 768 | 	netif_carrier_on(dev); | 
 | 769 |  | 
 | 770 | 	pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", | 
 | 771 | 		dev->name, | 
 | 772 | 		priv->force_speed_100 ? 100 : 10, | 
 | 773 | 		priv->force_duplex_full ? "full" : "half", | 
 | 774 | 		priv->pause_rx ? "rx" : "off", | 
 | 775 | 		priv->pause_tx ? "tx" : "off"); | 
 | 776 | } | 
 | 777 |  | 
 | 778 | /* | 
 | 779 |  * open callback, allocate dma rings & buffers and start rx operation | 
 | 780 |  */ | 
 | 781 | static int bcm_enet_open(struct net_device *dev) | 
 | 782 | { | 
 | 783 | 	struct bcm_enet_priv *priv; | 
 | 784 | 	struct sockaddr addr; | 
 | 785 | 	struct device *kdev; | 
 | 786 | 	struct phy_device *phydev; | 
 | 787 | 	int i, ret; | 
 | 788 | 	unsigned int size; | 
 | 789 | 	char phy_id[MII_BUS_ID_SIZE + 3]; | 
 | 790 | 	void *p; | 
 | 791 | 	u32 val; | 
 | 792 |  | 
 | 793 | 	priv = netdev_priv(dev); | 
 | 794 | 	kdev = &priv->pdev->dev; | 
 | 795 |  | 
 | 796 | 	if (priv->has_phy) { | 
 | 797 | 		/* connect to PHY */ | 
 | 798 | 		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, | 
 | 799 | 			 priv->mac_id ? "1" : "0", priv->phy_id); | 
 | 800 |  | 
 | 801 | 		phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0, | 
 | 802 | 				     PHY_INTERFACE_MODE_MII); | 
 | 803 |  | 
 | 804 | 		if (IS_ERR(phydev)) { | 
 | 805 | 			dev_err(kdev, "could not attach to PHY\n"); | 
 | 806 | 			return PTR_ERR(phydev); | 
 | 807 | 		} | 
 | 808 |  | 
 | 809 | 		/* mask with MAC supported features */ | 
 | 810 | 		phydev->supported &= (SUPPORTED_10baseT_Half | | 
 | 811 | 				      SUPPORTED_10baseT_Full | | 
 | 812 | 				      SUPPORTED_100baseT_Half | | 
 | 813 | 				      SUPPORTED_100baseT_Full | | 
 | 814 | 				      SUPPORTED_Autoneg | | 
 | 815 | 				      SUPPORTED_Pause | | 
 | 816 | 				      SUPPORTED_MII); | 
 | 817 | 		phydev->advertising = phydev->supported; | 
 | 818 |  | 
 | 819 | 		if (priv->pause_auto && priv->pause_rx && priv->pause_tx) | 
 | 820 | 			phydev->advertising |= SUPPORTED_Pause; | 
 | 821 | 		else | 
 | 822 | 			phydev->advertising &= ~SUPPORTED_Pause; | 
 | 823 |  | 
 | 824 | 		dev_info(kdev, "attached PHY at address %d [%s]\n", | 
 | 825 | 			 phydev->addr, phydev->drv->name); | 
 | 826 |  | 
 | 827 | 		priv->old_link = 0; | 
 | 828 | 		priv->old_duplex = -1; | 
 | 829 | 		priv->old_pause = -1; | 
 | 830 | 		priv->phydev = phydev; | 
 | 831 | 	} | 
 | 832 |  | 
 | 833 | 	/* mask all interrupts and request them */ | 
 | 834 | 	enet_writel(priv, 0, ENET_IRMASK_REG); | 
 | 835 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 836 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 837 |  | 
 | 838 | 	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); | 
 | 839 | 	if (ret) | 
 | 840 | 		goto out_phy_disconnect; | 
 | 841 |  | 
 | 842 | 	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, | 
 | 843 | 			  IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev); | 
 | 844 | 	if (ret) | 
 | 845 | 		goto out_freeirq; | 
 | 846 |  | 
 | 847 | 	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, | 
 | 848 | 			  IRQF_DISABLED, dev->name, dev); | 
 | 849 | 	if (ret) | 
 | 850 | 		goto out_freeirq_rx; | 
 | 851 |  | 
 | 852 | 	/* initialize perfect match registers */ | 
 | 853 | 	for (i = 0; i < 4; i++) { | 
 | 854 | 		enet_writel(priv, 0, ENET_PML_REG(i)); | 
 | 855 | 		enet_writel(priv, 0, ENET_PMH_REG(i)); | 
 | 856 | 	} | 
 | 857 |  | 
 | 858 | 	/* write device mac address */ | 
 | 859 | 	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); | 
 | 860 | 	bcm_enet_set_mac_address(dev, &addr); | 
 | 861 |  | 
 | 862 | 	/* allocate rx dma ring */ | 
 | 863 | 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); | 
 | 864 | 	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); | 
 | 865 | 	if (!p) { | 
 | 866 | 		dev_err(kdev, "cannot allocate rx ring %u\n", size); | 
 | 867 | 		ret = -ENOMEM; | 
 | 868 | 		goto out_freeirq_tx; | 
 | 869 | 	} | 
 | 870 |  | 
 | 871 | 	memset(p, 0, size); | 
 | 872 | 	priv->rx_desc_alloc_size = size; | 
 | 873 | 	priv->rx_desc_cpu = p; | 
 | 874 |  | 
 | 875 | 	/* allocate tx dma ring */ | 
 | 876 | 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); | 
 | 877 | 	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); | 
 | 878 | 	if (!p) { | 
 | 879 | 		dev_err(kdev, "cannot allocate tx ring\n"); | 
 | 880 | 		ret = -ENOMEM; | 
 | 881 | 		goto out_free_rx_ring; | 
 | 882 | 	} | 
 | 883 |  | 
 | 884 | 	memset(p, 0, size); | 
 | 885 | 	priv->tx_desc_alloc_size = size; | 
 | 886 | 	priv->tx_desc_cpu = p; | 
 | 887 |  | 
 | 888 | 	priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, | 
 | 889 | 			       GFP_KERNEL); | 
 | 890 | 	if (!priv->tx_skb) { | 
 | 891 | 		dev_err(kdev, "cannot allocate rx skb queue\n"); | 
 | 892 | 		ret = -ENOMEM; | 
 | 893 | 		goto out_free_tx_ring; | 
 | 894 | 	} | 
 | 895 |  | 
 | 896 | 	priv->tx_desc_count = priv->tx_ring_size; | 
 | 897 | 	priv->tx_dirty_desc = 0; | 
 | 898 | 	priv->tx_curr_desc = 0; | 
 | 899 | 	spin_lock_init(&priv->tx_lock); | 
 | 900 |  | 
 | 901 | 	/* init & fill rx ring with skbs */ | 
 | 902 | 	priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, | 
 | 903 | 			       GFP_KERNEL); | 
 | 904 | 	if (!priv->rx_skb) { | 
 | 905 | 		dev_err(kdev, "cannot allocate rx skb queue\n"); | 
 | 906 | 		ret = -ENOMEM; | 
 | 907 | 		goto out_free_tx_skb; | 
 | 908 | 	} | 
 | 909 |  | 
 | 910 | 	priv->rx_desc_count = 0; | 
 | 911 | 	priv->rx_dirty_desc = 0; | 
 | 912 | 	priv->rx_curr_desc = 0; | 
 | 913 |  | 
 | 914 | 	/* initialize flow control buffer allocation */ | 
 | 915 | 	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, | 
 | 916 | 			ENETDMA_BUFALLOC_REG(priv->rx_chan)); | 
 | 917 |  | 
 | 918 | 	if (bcm_enet_refill_rx(dev)) { | 
 | 919 | 		dev_err(kdev, "cannot allocate rx skb queue\n"); | 
 | 920 | 		ret = -ENOMEM; | 
 | 921 | 		goto out; | 
 | 922 | 	} | 
 | 923 |  | 
 | 924 | 	/* write rx & tx ring addresses */ | 
 | 925 | 	enet_dma_writel(priv, priv->rx_desc_dma, | 
 | 926 | 			ENETDMA_RSTART_REG(priv->rx_chan)); | 
 | 927 | 	enet_dma_writel(priv, priv->tx_desc_dma, | 
 | 928 | 			ENETDMA_RSTART_REG(priv->tx_chan)); | 
 | 929 |  | 
 | 930 | 	/* clear remaining state ram for rx & tx channel */ | 
 | 931 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); | 
 | 932 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); | 
 | 933 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); | 
 | 934 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); | 
 | 935 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); | 
 | 936 | 	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); | 
 | 937 |  | 
 | 938 | 	/* set max rx/tx length */ | 
 | 939 | 	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); | 
 | 940 | 	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); | 
 | 941 |  | 
 | 942 | 	/* set dma maximum burst len */ | 
 | 943 | 	enet_dma_writel(priv, BCMENET_DMA_MAXBURST, | 
 | 944 | 			ENETDMA_MAXBURST_REG(priv->rx_chan)); | 
 | 945 | 	enet_dma_writel(priv, BCMENET_DMA_MAXBURST, | 
 | 946 | 			ENETDMA_MAXBURST_REG(priv->tx_chan)); | 
 | 947 |  | 
 | 948 | 	/* set correct transmit fifo watermark */ | 
 | 949 | 	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); | 
 | 950 |  | 
 | 951 | 	/* set flow control low/high threshold to 1/3 / 2/3 */ | 
 | 952 | 	val = priv->rx_ring_size / 3; | 
 | 953 | 	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); | 
 | 954 | 	val = (priv->rx_ring_size * 2) / 3; | 
 | 955 | 	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); | 
 | 956 |  | 
 | 957 | 	/* all set, enable mac and interrupts, start dma engine and | 
 | 958 | 	 * kick rx dma channel */ | 
 | 959 | 	wmb(); | 
| Florian Fainelli | 5e10d4a | 2010-04-09 01:04:52 +0000 | [diff] [blame] | 960 | 	val = enet_readl(priv, ENET_CTL_REG); | 
 | 961 | 	val |= ENET_CTL_ENABLE_MASK; | 
 | 962 | 	enet_writel(priv, val, ENET_CTL_REG); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 963 | 	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); | 
 | 964 | 	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, | 
 | 965 | 			ENETDMA_CHANCFG_REG(priv->rx_chan)); | 
 | 966 |  | 
 | 967 | 	/* watch "mib counters about to overflow" interrupt */ | 
 | 968 | 	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); | 
 | 969 | 	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); | 
 | 970 |  | 
 | 971 | 	/* watch "packet transferred" interrupt in rx and tx */ | 
 | 972 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 973 | 			ENETDMA_IR_REG(priv->rx_chan)); | 
 | 974 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 975 | 			ENETDMA_IR_REG(priv->tx_chan)); | 
 | 976 |  | 
 | 977 | 	/* make sure we enable napi before rx interrupt  */ | 
 | 978 | 	napi_enable(&priv->napi); | 
 | 979 |  | 
 | 980 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 981 | 			ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 982 | 	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, | 
 | 983 | 			ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 984 |  | 
 | 985 | 	if (priv->has_phy) | 
 | 986 | 		phy_start(priv->phydev); | 
 | 987 | 	else | 
 | 988 | 		bcm_enet_adjust_link(dev); | 
 | 989 |  | 
 | 990 | 	netif_start_queue(dev); | 
 | 991 | 	return 0; | 
 | 992 |  | 
 | 993 | out: | 
 | 994 | 	for (i = 0; i < priv->rx_ring_size; i++) { | 
 | 995 | 		struct bcm_enet_desc *desc; | 
 | 996 |  | 
 | 997 | 		if (!priv->rx_skb[i]) | 
 | 998 | 			continue; | 
 | 999 |  | 
 | 1000 | 		desc = &priv->rx_desc_cpu[i]; | 
 | 1001 | 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size, | 
 | 1002 | 				 DMA_FROM_DEVICE); | 
 | 1003 | 		kfree_skb(priv->rx_skb[i]); | 
 | 1004 | 	} | 
 | 1005 | 	kfree(priv->rx_skb); | 
 | 1006 |  | 
 | 1007 | out_free_tx_skb: | 
 | 1008 | 	kfree(priv->tx_skb); | 
 | 1009 |  | 
 | 1010 | out_free_tx_ring: | 
 | 1011 | 	dma_free_coherent(kdev, priv->tx_desc_alloc_size, | 
 | 1012 | 			  priv->tx_desc_cpu, priv->tx_desc_dma); | 
 | 1013 |  | 
 | 1014 | out_free_rx_ring: | 
 | 1015 | 	dma_free_coherent(kdev, priv->rx_desc_alloc_size, | 
 | 1016 | 			  priv->rx_desc_cpu, priv->rx_desc_dma); | 
 | 1017 |  | 
 | 1018 | out_freeirq_tx: | 
 | 1019 | 	free_irq(priv->irq_tx, dev); | 
 | 1020 |  | 
 | 1021 | out_freeirq_rx: | 
 | 1022 | 	free_irq(priv->irq_rx, dev); | 
 | 1023 |  | 
 | 1024 | out_freeirq: | 
 | 1025 | 	free_irq(dev->irq, dev); | 
 | 1026 |  | 
 | 1027 | out_phy_disconnect: | 
 | 1028 | 	phy_disconnect(priv->phydev); | 
 | 1029 |  | 
 | 1030 | 	return ret; | 
 | 1031 | } | 
 | 1032 |  | 
 | 1033 | /* | 
 | 1034 |  * disable mac | 
 | 1035 |  */ | 
 | 1036 | static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) | 
 | 1037 | { | 
 | 1038 | 	int limit; | 
 | 1039 | 	u32 val; | 
 | 1040 |  | 
 | 1041 | 	val = enet_readl(priv, ENET_CTL_REG); | 
 | 1042 | 	val |= ENET_CTL_DISABLE_MASK; | 
 | 1043 | 	enet_writel(priv, val, ENET_CTL_REG); | 
 | 1044 |  | 
 | 1045 | 	limit = 1000; | 
 | 1046 | 	do { | 
 | 1047 | 		u32 val; | 
 | 1048 |  | 
 | 1049 | 		val = enet_readl(priv, ENET_CTL_REG); | 
 | 1050 | 		if (!(val & ENET_CTL_DISABLE_MASK)) | 
 | 1051 | 			break; | 
 | 1052 | 		udelay(1); | 
 | 1053 | 	} while (limit--); | 
 | 1054 | } | 
 | 1055 |  | 
 | 1056 | /* | 
 | 1057 |  * disable dma in given channel | 
 | 1058 |  */ | 
 | 1059 | static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) | 
 | 1060 | { | 
 | 1061 | 	int limit; | 
 | 1062 |  | 
 | 1063 | 	enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); | 
 | 1064 |  | 
 | 1065 | 	limit = 1000; | 
 | 1066 | 	do { | 
 | 1067 | 		u32 val; | 
 | 1068 |  | 
 | 1069 | 		val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); | 
 | 1070 | 		if (!(val & ENETDMA_CHANCFG_EN_MASK)) | 
 | 1071 | 			break; | 
 | 1072 | 		udelay(1); | 
 | 1073 | 	} while (limit--); | 
 | 1074 | } | 
 | 1075 |  | 
 | 1076 | /* | 
 | 1077 |  * stop callback | 
 | 1078 |  */ | 
 | 1079 | static int bcm_enet_stop(struct net_device *dev) | 
 | 1080 | { | 
 | 1081 | 	struct bcm_enet_priv *priv; | 
 | 1082 | 	struct device *kdev; | 
 | 1083 | 	int i; | 
 | 1084 |  | 
 | 1085 | 	priv = netdev_priv(dev); | 
 | 1086 | 	kdev = &priv->pdev->dev; | 
 | 1087 |  | 
 | 1088 | 	netif_stop_queue(dev); | 
 | 1089 | 	napi_disable(&priv->napi); | 
 | 1090 | 	if (priv->has_phy) | 
 | 1091 | 		phy_stop(priv->phydev); | 
 | 1092 | 	del_timer_sync(&priv->rx_timeout); | 
 | 1093 |  | 
 | 1094 | 	/* mask all interrupts */ | 
 | 1095 | 	enet_writel(priv, 0, ENET_IRMASK_REG); | 
 | 1096 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); | 
 | 1097 | 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); | 
 | 1098 |  | 
 | 1099 | 	/* make sure no mib update is scheduled */ | 
 | 1100 | 	flush_scheduled_work(); | 
 | 1101 |  | 
 | 1102 | 	/* disable dma & mac */ | 
 | 1103 | 	bcm_enet_disable_dma(priv, priv->tx_chan); | 
 | 1104 | 	bcm_enet_disable_dma(priv, priv->rx_chan); | 
 | 1105 | 	bcm_enet_disable_mac(priv); | 
 | 1106 |  | 
 | 1107 | 	/* force reclaim of all tx buffers */ | 
 | 1108 | 	bcm_enet_tx_reclaim(dev, 1); | 
 | 1109 |  | 
 | 1110 | 	/* free the rx skb ring */ | 
 | 1111 | 	for (i = 0; i < priv->rx_ring_size; i++) { | 
 | 1112 | 		struct bcm_enet_desc *desc; | 
 | 1113 |  | 
 | 1114 | 		if (!priv->rx_skb[i]) | 
 | 1115 | 			continue; | 
 | 1116 |  | 
 | 1117 | 		desc = &priv->rx_desc_cpu[i]; | 
 | 1118 | 		dma_unmap_single(kdev, desc->address, priv->rx_skb_size, | 
 | 1119 | 				 DMA_FROM_DEVICE); | 
 | 1120 | 		kfree_skb(priv->rx_skb[i]); | 
 | 1121 | 	} | 
 | 1122 |  | 
 | 1123 | 	/* free remaining allocated memory */ | 
 | 1124 | 	kfree(priv->rx_skb); | 
 | 1125 | 	kfree(priv->tx_skb); | 
 | 1126 | 	dma_free_coherent(kdev, priv->rx_desc_alloc_size, | 
 | 1127 | 			  priv->rx_desc_cpu, priv->rx_desc_dma); | 
 | 1128 | 	dma_free_coherent(kdev, priv->tx_desc_alloc_size, | 
 | 1129 | 			  priv->tx_desc_cpu, priv->tx_desc_dma); | 
 | 1130 | 	free_irq(priv->irq_tx, dev); | 
 | 1131 | 	free_irq(priv->irq_rx, dev); | 
 | 1132 | 	free_irq(dev->irq, dev); | 
 | 1133 |  | 
 | 1134 | 	/* release phy */ | 
 | 1135 | 	if (priv->has_phy) { | 
 | 1136 | 		phy_disconnect(priv->phydev); | 
 | 1137 | 		priv->phydev = NULL; | 
 | 1138 | 	} | 
 | 1139 |  | 
 | 1140 | 	return 0; | 
 | 1141 | } | 
 | 1142 |  | 
 | 1143 | /* | 
 | 1144 |  * core request to return device rx/tx stats | 
 | 1145 |  */ | 
 | 1146 | static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev) | 
 | 1147 | { | 
 | 1148 | 	struct bcm_enet_priv *priv; | 
 | 1149 |  | 
 | 1150 | 	priv = netdev_priv(dev); | 
 | 1151 | 	return &priv->stats; | 
 | 1152 | } | 
 | 1153 |  | 
 | 1154 | /* | 
 | 1155 |  * ethtool callbacks | 
 | 1156 |  */ | 
 | 1157 | struct bcm_enet_stats { | 
 | 1158 | 	char stat_string[ETH_GSTRING_LEN]; | 
 | 1159 | 	int sizeof_stat; | 
 | 1160 | 	int stat_offset; | 
 | 1161 | 	int mib_reg; | 
 | 1162 | }; | 
 | 1163 |  | 
 | 1164 | #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),		\ | 
 | 1165 | 		     offsetof(struct bcm_enet_priv, m) | 
 | 1166 |  | 
 | 1167 | static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { | 
 | 1168 | 	{ "rx_packets", GEN_STAT(stats.rx_packets), -1 }, | 
 | 1169 | 	{ "tx_packets",	GEN_STAT(stats.tx_packets), -1 }, | 
 | 1170 | 	{ "rx_bytes", GEN_STAT(stats.rx_bytes), -1 }, | 
 | 1171 | 	{ "tx_bytes", GEN_STAT(stats.tx_bytes), -1 }, | 
 | 1172 | 	{ "rx_errors", GEN_STAT(stats.rx_errors), -1 }, | 
 | 1173 | 	{ "tx_errors", GEN_STAT(stats.tx_errors), -1 }, | 
 | 1174 | 	{ "rx_dropped",	GEN_STAT(stats.rx_dropped), -1 }, | 
 | 1175 | 	{ "tx_dropped",	GEN_STAT(stats.tx_dropped), -1 }, | 
 | 1176 |  | 
 | 1177 | 	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, | 
 | 1178 | 	{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, | 
 | 1179 | 	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, | 
 | 1180 | 	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, | 
 | 1181 | 	{ "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, | 
 | 1182 | 	{ "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, | 
 | 1183 | 	{ "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, | 
 | 1184 | 	{ "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, | 
 | 1185 | 	{ "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, | 
 | 1186 | 	{ "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, | 
 | 1187 | 	{ "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, | 
 | 1188 | 	{ "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, | 
 | 1189 | 	{ "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, | 
 | 1190 | 	{ "rx_dropped",	GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, | 
 | 1191 | 	{ "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, | 
 | 1192 | 	{ "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, | 
 | 1193 | 	{ "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, | 
 | 1194 | 	{ "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, | 
 | 1195 | 	{ "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, | 
 | 1196 | 	{ "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, | 
 | 1197 | 	{ "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, | 
 | 1198 |  | 
 | 1199 | 	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, | 
 | 1200 | 	{ "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, | 
 | 1201 | 	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, | 
 | 1202 | 	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, | 
 | 1203 | 	{ "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, | 
 | 1204 | 	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, | 
 | 1205 | 	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, | 
 | 1206 | 	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, | 
 | 1207 | 	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, | 
 | 1208 | 	{ "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, | 
 | 1209 | 	{ "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, | 
 | 1210 | 	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, | 
 | 1211 | 	{ "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, | 
 | 1212 | 	{ "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, | 
 | 1213 | 	{ "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, | 
 | 1214 | 	{ "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, | 
 | 1215 | 	{ "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, | 
 | 1216 | 	{ "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, | 
 | 1217 | 	{ "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, | 
 | 1218 | 	{ "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, | 
 | 1219 | 	{ "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, | 
 | 1220 | 	{ "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, | 
 | 1221 |  | 
 | 1222 | }; | 
 | 1223 |  | 
 | 1224 | #define BCM_ENET_STATS_LEN	\ | 
 | 1225 | 	(sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats)) | 
 | 1226 |  | 
 | 1227 | static const u32 unused_mib_regs[] = { | 
 | 1228 | 	ETH_MIB_TX_ALL_OCTETS, | 
 | 1229 | 	ETH_MIB_TX_ALL_PKTS, | 
 | 1230 | 	ETH_MIB_RX_ALL_OCTETS, | 
 | 1231 | 	ETH_MIB_RX_ALL_PKTS, | 
 | 1232 | }; | 
 | 1233 |  | 
 | 1234 |  | 
 | 1235 | static void bcm_enet_get_drvinfo(struct net_device *netdev, | 
 | 1236 | 				 struct ethtool_drvinfo *drvinfo) | 
 | 1237 | { | 
 | 1238 | 	strncpy(drvinfo->driver, bcm_enet_driver_name, 32); | 
 | 1239 | 	strncpy(drvinfo->version, bcm_enet_driver_version, 32); | 
 | 1240 | 	strncpy(drvinfo->fw_version, "N/A", 32); | 
 | 1241 | 	strncpy(drvinfo->bus_info, "bcm63xx", 32); | 
 | 1242 | 	drvinfo->n_stats = BCM_ENET_STATS_LEN; | 
 | 1243 | } | 
 | 1244 |  | 
| Florian Fainelli | a3f92ee | 2009-12-15 06:45:06 +0000 | [diff] [blame] | 1245 | static int bcm_enet_get_sset_count(struct net_device *netdev, | 
 | 1246 | 					int string_set) | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1247 | { | 
| Florian Fainelli | a3f92ee | 2009-12-15 06:45:06 +0000 | [diff] [blame] | 1248 | 	switch (string_set) { | 
 | 1249 | 	case ETH_SS_STATS: | 
 | 1250 | 		return BCM_ENET_STATS_LEN; | 
 | 1251 | 	default: | 
 | 1252 | 		return -EINVAL; | 
 | 1253 | 	} | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1254 | } | 
 | 1255 |  | 
 | 1256 | static void bcm_enet_get_strings(struct net_device *netdev, | 
 | 1257 | 				 u32 stringset, u8 *data) | 
 | 1258 | { | 
 | 1259 | 	int i; | 
 | 1260 |  | 
 | 1261 | 	switch (stringset) { | 
 | 1262 | 	case ETH_SS_STATS: | 
 | 1263 | 		for (i = 0; i < BCM_ENET_STATS_LEN; i++) { | 
 | 1264 | 			memcpy(data + i * ETH_GSTRING_LEN, | 
 | 1265 | 			       bcm_enet_gstrings_stats[i].stat_string, | 
 | 1266 | 			       ETH_GSTRING_LEN); | 
 | 1267 | 		} | 
 | 1268 | 		break; | 
 | 1269 | 	} | 
 | 1270 | } | 
 | 1271 |  | 
 | 1272 | static void update_mib_counters(struct bcm_enet_priv *priv) | 
 | 1273 | { | 
 | 1274 | 	int i; | 
 | 1275 |  | 
 | 1276 | 	for (i = 0; i < BCM_ENET_STATS_LEN; i++) { | 
 | 1277 | 		const struct bcm_enet_stats *s; | 
 | 1278 | 		u32 val; | 
 | 1279 | 		char *p; | 
 | 1280 |  | 
 | 1281 | 		s = &bcm_enet_gstrings_stats[i]; | 
 | 1282 | 		if (s->mib_reg == -1) | 
 | 1283 | 			continue; | 
 | 1284 |  | 
 | 1285 | 		val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); | 
 | 1286 | 		p = (char *)priv + s->stat_offset; | 
 | 1287 |  | 
 | 1288 | 		if (s->sizeof_stat == sizeof(u64)) | 
 | 1289 | 			*(u64 *)p += val; | 
 | 1290 | 		else | 
 | 1291 | 			*(u32 *)p += val; | 
 | 1292 | 	} | 
 | 1293 |  | 
 | 1294 | 	/* also empty unused mib counters to make sure mib counter | 
 | 1295 | 	 * overflow interrupt is cleared */ | 
 | 1296 | 	for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) | 
 | 1297 | 		(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); | 
 | 1298 | } | 
 | 1299 |  | 
 | 1300 | static void bcm_enet_update_mib_counters_defer(struct work_struct *t) | 
 | 1301 | { | 
 | 1302 | 	struct bcm_enet_priv *priv; | 
 | 1303 |  | 
 | 1304 | 	priv = container_of(t, struct bcm_enet_priv, mib_update_task); | 
 | 1305 | 	mutex_lock(&priv->mib_update_lock); | 
 | 1306 | 	update_mib_counters(priv); | 
 | 1307 | 	mutex_unlock(&priv->mib_update_lock); | 
 | 1308 |  | 
 | 1309 | 	/* reenable mib interrupt */ | 
 | 1310 | 	if (netif_running(priv->net_dev)) | 
 | 1311 | 		enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); | 
 | 1312 | } | 
 | 1313 |  | 
 | 1314 | static void bcm_enet_get_ethtool_stats(struct net_device *netdev, | 
 | 1315 | 				       struct ethtool_stats *stats, | 
 | 1316 | 				       u64 *data) | 
 | 1317 | { | 
 | 1318 | 	struct bcm_enet_priv *priv; | 
 | 1319 | 	int i; | 
 | 1320 |  | 
 | 1321 | 	priv = netdev_priv(netdev); | 
 | 1322 |  | 
 | 1323 | 	mutex_lock(&priv->mib_update_lock); | 
 | 1324 | 	update_mib_counters(priv); | 
 | 1325 |  | 
 | 1326 | 	for (i = 0; i < BCM_ENET_STATS_LEN; i++) { | 
 | 1327 | 		const struct bcm_enet_stats *s; | 
 | 1328 | 		char *p; | 
 | 1329 |  | 
 | 1330 | 		s = &bcm_enet_gstrings_stats[i]; | 
 | 1331 | 		p = (char *)priv + s->stat_offset; | 
 | 1332 | 		data[i] = (s->sizeof_stat == sizeof(u64)) ? | 
 | 1333 | 			*(u64 *)p : *(u32 *)p; | 
 | 1334 | 	} | 
 | 1335 | 	mutex_unlock(&priv->mib_update_lock); | 
 | 1336 | } | 
 | 1337 |  | 
 | 1338 | static int bcm_enet_get_settings(struct net_device *dev, | 
 | 1339 | 				 struct ethtool_cmd *cmd) | 
 | 1340 | { | 
 | 1341 | 	struct bcm_enet_priv *priv; | 
 | 1342 |  | 
 | 1343 | 	priv = netdev_priv(dev); | 
 | 1344 |  | 
 | 1345 | 	cmd->maxrxpkt = 0; | 
 | 1346 | 	cmd->maxtxpkt = 0; | 
 | 1347 |  | 
 | 1348 | 	if (priv->has_phy) { | 
 | 1349 | 		if (!priv->phydev) | 
 | 1350 | 			return -ENODEV; | 
 | 1351 | 		return phy_ethtool_gset(priv->phydev, cmd); | 
 | 1352 | 	} else { | 
 | 1353 | 		cmd->autoneg = 0; | 
 | 1354 | 		cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10; | 
 | 1355 | 		cmd->duplex = (priv->force_duplex_full) ? | 
 | 1356 | 			DUPLEX_FULL : DUPLEX_HALF; | 
 | 1357 | 		cmd->supported = ADVERTISED_10baseT_Half  | | 
 | 1358 | 			ADVERTISED_10baseT_Full | | 
 | 1359 | 			ADVERTISED_100baseT_Half | | 
 | 1360 | 			ADVERTISED_100baseT_Full; | 
 | 1361 | 		cmd->advertising = 0; | 
 | 1362 | 		cmd->port = PORT_MII; | 
 | 1363 | 		cmd->transceiver = XCVR_EXTERNAL; | 
 | 1364 | 	} | 
 | 1365 | 	return 0; | 
 | 1366 | } | 
 | 1367 |  | 
 | 1368 | static int bcm_enet_set_settings(struct net_device *dev, | 
 | 1369 | 				 struct ethtool_cmd *cmd) | 
 | 1370 | { | 
 | 1371 | 	struct bcm_enet_priv *priv; | 
 | 1372 |  | 
 | 1373 | 	priv = netdev_priv(dev); | 
 | 1374 | 	if (priv->has_phy) { | 
 | 1375 | 		if (!priv->phydev) | 
 | 1376 | 			return -ENODEV; | 
 | 1377 | 		return phy_ethtool_sset(priv->phydev, cmd); | 
 | 1378 | 	} else { | 
 | 1379 |  | 
 | 1380 | 		if (cmd->autoneg || | 
 | 1381 | 		    (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || | 
 | 1382 | 		    cmd->port != PORT_MII) | 
 | 1383 | 			return -EINVAL; | 
 | 1384 |  | 
 | 1385 | 		priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; | 
 | 1386 | 		priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; | 
 | 1387 |  | 
 | 1388 | 		if (netif_running(dev)) | 
 | 1389 | 			bcm_enet_adjust_link(dev); | 
 | 1390 | 		return 0; | 
 | 1391 | 	} | 
 | 1392 | } | 
 | 1393 |  | 
 | 1394 | static void bcm_enet_get_ringparam(struct net_device *dev, | 
 | 1395 | 				   struct ethtool_ringparam *ering) | 
 | 1396 | { | 
 | 1397 | 	struct bcm_enet_priv *priv; | 
 | 1398 |  | 
 | 1399 | 	priv = netdev_priv(dev); | 
 | 1400 |  | 
 | 1401 | 	/* rx/tx ring is actually only limited by memory */ | 
 | 1402 | 	ering->rx_max_pending = 8192; | 
 | 1403 | 	ering->tx_max_pending = 8192; | 
 | 1404 | 	ering->rx_mini_max_pending = 0; | 
 | 1405 | 	ering->rx_jumbo_max_pending = 0; | 
 | 1406 | 	ering->rx_pending = priv->rx_ring_size; | 
 | 1407 | 	ering->tx_pending = priv->tx_ring_size; | 
 | 1408 | } | 
 | 1409 |  | 
 | 1410 | static int bcm_enet_set_ringparam(struct net_device *dev, | 
 | 1411 | 				  struct ethtool_ringparam *ering) | 
 | 1412 | { | 
 | 1413 | 	struct bcm_enet_priv *priv; | 
 | 1414 | 	int was_running; | 
 | 1415 |  | 
 | 1416 | 	priv = netdev_priv(dev); | 
 | 1417 |  | 
 | 1418 | 	was_running = 0; | 
 | 1419 | 	if (netif_running(dev)) { | 
 | 1420 | 		bcm_enet_stop(dev); | 
 | 1421 | 		was_running = 1; | 
 | 1422 | 	} | 
 | 1423 |  | 
 | 1424 | 	priv->rx_ring_size = ering->rx_pending; | 
 | 1425 | 	priv->tx_ring_size = ering->tx_pending; | 
 | 1426 |  | 
 | 1427 | 	if (was_running) { | 
 | 1428 | 		int err; | 
 | 1429 |  | 
 | 1430 | 		err = bcm_enet_open(dev); | 
 | 1431 | 		if (err) | 
 | 1432 | 			dev_close(dev); | 
 | 1433 | 		else | 
 | 1434 | 			bcm_enet_set_multicast_list(dev); | 
 | 1435 | 	} | 
 | 1436 | 	return 0; | 
 | 1437 | } | 
 | 1438 |  | 
 | 1439 | static void bcm_enet_get_pauseparam(struct net_device *dev, | 
 | 1440 | 				    struct ethtool_pauseparam *ecmd) | 
 | 1441 | { | 
 | 1442 | 	struct bcm_enet_priv *priv; | 
 | 1443 |  | 
 | 1444 | 	priv = netdev_priv(dev); | 
 | 1445 | 	ecmd->autoneg = priv->pause_auto; | 
 | 1446 | 	ecmd->rx_pause = priv->pause_rx; | 
 | 1447 | 	ecmd->tx_pause = priv->pause_tx; | 
 | 1448 | } | 
 | 1449 |  | 
 | 1450 | static int bcm_enet_set_pauseparam(struct net_device *dev, | 
 | 1451 | 				   struct ethtool_pauseparam *ecmd) | 
 | 1452 | { | 
 | 1453 | 	struct bcm_enet_priv *priv; | 
 | 1454 |  | 
 | 1455 | 	priv = netdev_priv(dev); | 
 | 1456 |  | 
 | 1457 | 	if (priv->has_phy) { | 
 | 1458 | 		if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { | 
 | 1459 | 			/* asymetric pause mode not supported, | 
 | 1460 | 			 * actually possible but integrated PHY has RO | 
 | 1461 | 			 * asym_pause bit */ | 
 | 1462 | 			return -EINVAL; | 
 | 1463 | 		} | 
 | 1464 | 	} else { | 
 | 1465 | 		/* no pause autoneg on direct mii connection */ | 
 | 1466 | 		if (ecmd->autoneg) | 
 | 1467 | 			return -EINVAL; | 
 | 1468 | 	} | 
 | 1469 |  | 
 | 1470 | 	priv->pause_auto = ecmd->autoneg; | 
 | 1471 | 	priv->pause_rx = ecmd->rx_pause; | 
 | 1472 | 	priv->pause_tx = ecmd->tx_pause; | 
 | 1473 |  | 
 | 1474 | 	return 0; | 
 | 1475 | } | 
 | 1476 |  | 
 | 1477 | static struct ethtool_ops bcm_enet_ethtool_ops = { | 
 | 1478 | 	.get_strings		= bcm_enet_get_strings, | 
| Florian Fainelli | a3f92ee | 2009-12-15 06:45:06 +0000 | [diff] [blame] | 1479 | 	.get_sset_count		= bcm_enet_get_sset_count, | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1480 | 	.get_ethtool_stats      = bcm_enet_get_ethtool_stats, | 
 | 1481 | 	.get_settings		= bcm_enet_get_settings, | 
 | 1482 | 	.set_settings		= bcm_enet_set_settings, | 
 | 1483 | 	.get_drvinfo		= bcm_enet_get_drvinfo, | 
 | 1484 | 	.get_link		= ethtool_op_get_link, | 
 | 1485 | 	.get_ringparam		= bcm_enet_get_ringparam, | 
 | 1486 | 	.set_ringparam		= bcm_enet_set_ringparam, | 
 | 1487 | 	.get_pauseparam		= bcm_enet_get_pauseparam, | 
 | 1488 | 	.set_pauseparam		= bcm_enet_set_pauseparam, | 
 | 1489 | }; | 
 | 1490 |  | 
 | 1491 | static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 
 | 1492 | { | 
 | 1493 | 	struct bcm_enet_priv *priv; | 
 | 1494 |  | 
 | 1495 | 	priv = netdev_priv(dev); | 
 | 1496 | 	if (priv->has_phy) { | 
 | 1497 | 		if (!priv->phydev) | 
 | 1498 | 			return -ENODEV; | 
 | 1499 | 		return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); | 
 | 1500 | 	} else { | 
 | 1501 | 		struct mii_if_info mii; | 
 | 1502 |  | 
 | 1503 | 		mii.dev = dev; | 
 | 1504 | 		mii.mdio_read = bcm_enet_mdio_read_mii; | 
 | 1505 | 		mii.mdio_write = bcm_enet_mdio_write_mii; | 
 | 1506 | 		mii.phy_id = 0; | 
 | 1507 | 		mii.phy_id_mask = 0x3f; | 
 | 1508 | 		mii.reg_num_mask = 0x1f; | 
 | 1509 | 		return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); | 
 | 1510 | 	} | 
 | 1511 | } | 
 | 1512 |  | 
 | 1513 | /* | 
 | 1514 |  * calculate actual hardware mtu | 
 | 1515 |  */ | 
 | 1516 | static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) | 
 | 1517 | { | 
 | 1518 | 	int actual_mtu; | 
 | 1519 |  | 
 | 1520 | 	actual_mtu = mtu; | 
 | 1521 |  | 
 | 1522 | 	/* add ethernet header + vlan tag size */ | 
 | 1523 | 	actual_mtu += VLAN_ETH_HLEN; | 
 | 1524 |  | 
 | 1525 | 	if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) | 
 | 1526 | 		return -EINVAL; | 
 | 1527 |  | 
 | 1528 | 	/* | 
 | 1529 | 	 * setup maximum size before we get overflow mark in | 
 | 1530 | 	 * descriptor, note that this will not prevent reception of | 
 | 1531 | 	 * big frames, they will be split into multiple buffers | 
 | 1532 | 	 * anyway | 
 | 1533 | 	 */ | 
 | 1534 | 	priv->hw_mtu = actual_mtu; | 
 | 1535 |  | 
 | 1536 | 	/* | 
 | 1537 | 	 * align rx buffer size to dma burst len, account FCS since | 
 | 1538 | 	 * it's appended | 
 | 1539 | 	 */ | 
 | 1540 | 	priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, | 
 | 1541 | 				  BCMENET_DMA_MAXBURST * 4); | 
 | 1542 | 	return 0; | 
 | 1543 | } | 
 | 1544 |  | 
 | 1545 | /* | 
 | 1546 |  * adjust mtu, can't be called while device is running | 
 | 1547 |  */ | 
 | 1548 | static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) | 
 | 1549 | { | 
 | 1550 | 	int ret; | 
 | 1551 |  | 
 | 1552 | 	if (netif_running(dev)) | 
 | 1553 | 		return -EBUSY; | 
 | 1554 |  | 
 | 1555 | 	ret = compute_hw_mtu(netdev_priv(dev), new_mtu); | 
 | 1556 | 	if (ret) | 
 | 1557 | 		return ret; | 
 | 1558 | 	dev->mtu = new_mtu; | 
 | 1559 | 	return 0; | 
 | 1560 | } | 
 | 1561 |  | 
 | 1562 | /* | 
 | 1563 |  * preinit hardware to allow mii operation while device is down | 
 | 1564 |  */ | 
 | 1565 | static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) | 
 | 1566 | { | 
 | 1567 | 	u32 val; | 
 | 1568 | 	int limit; | 
 | 1569 |  | 
 | 1570 | 	/* make sure mac is disabled */ | 
 | 1571 | 	bcm_enet_disable_mac(priv); | 
 | 1572 |  | 
 | 1573 | 	/* soft reset mac */ | 
 | 1574 | 	val = ENET_CTL_SRESET_MASK; | 
 | 1575 | 	enet_writel(priv, val, ENET_CTL_REG); | 
 | 1576 | 	wmb(); | 
 | 1577 |  | 
 | 1578 | 	limit = 1000; | 
 | 1579 | 	do { | 
 | 1580 | 		val = enet_readl(priv, ENET_CTL_REG); | 
 | 1581 | 		if (!(val & ENET_CTL_SRESET_MASK)) | 
 | 1582 | 			break; | 
 | 1583 | 		udelay(1); | 
 | 1584 | 	} while (limit--); | 
 | 1585 |  | 
 | 1586 | 	/* select correct mii interface */ | 
 | 1587 | 	val = enet_readl(priv, ENET_CTL_REG); | 
 | 1588 | 	if (priv->use_external_mii) | 
 | 1589 | 		val |= ENET_CTL_EPHYSEL_MASK; | 
 | 1590 | 	else | 
 | 1591 | 		val &= ~ENET_CTL_EPHYSEL_MASK; | 
 | 1592 | 	enet_writel(priv, val, ENET_CTL_REG); | 
 | 1593 |  | 
 | 1594 | 	/* turn on mdc clock */ | 
 | 1595 | 	enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | | 
 | 1596 | 		    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); | 
 | 1597 |  | 
 | 1598 | 	/* set mib counters to self-clear when read */ | 
 | 1599 | 	val = enet_readl(priv, ENET_MIBCTL_REG); | 
 | 1600 | 	val |= ENET_MIBCTL_RDCLEAR_MASK; | 
 | 1601 | 	enet_writel(priv, val, ENET_MIBCTL_REG); | 
 | 1602 | } | 
 | 1603 |  | 
 | 1604 | static const struct net_device_ops bcm_enet_ops = { | 
 | 1605 | 	.ndo_open		= bcm_enet_open, | 
 | 1606 | 	.ndo_stop		= bcm_enet_stop, | 
 | 1607 | 	.ndo_start_xmit		= bcm_enet_start_xmit, | 
 | 1608 | 	.ndo_get_stats		= bcm_enet_get_stats, | 
 | 1609 | 	.ndo_set_mac_address	= bcm_enet_set_mac_address, | 
 | 1610 | 	.ndo_set_multicast_list = bcm_enet_set_multicast_list, | 
 | 1611 | 	.ndo_do_ioctl		= bcm_enet_ioctl, | 
 | 1612 | 	.ndo_change_mtu		= bcm_enet_change_mtu, | 
 | 1613 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 1614 | 	.ndo_poll_controller = bcm_enet_netpoll, | 
 | 1615 | #endif | 
 | 1616 | }; | 
 | 1617 |  | 
 | 1618 | /* | 
 | 1619 |  * allocate netdevice, request register memory and register device. | 
 | 1620 |  */ | 
 | 1621 | static int __devinit bcm_enet_probe(struct platform_device *pdev) | 
 | 1622 | { | 
 | 1623 | 	struct bcm_enet_priv *priv; | 
 | 1624 | 	struct net_device *dev; | 
 | 1625 | 	struct bcm63xx_enet_platform_data *pd; | 
 | 1626 | 	struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; | 
 | 1627 | 	struct mii_bus *bus; | 
 | 1628 | 	const char *clk_name; | 
 | 1629 | 	unsigned int iomem_size; | 
 | 1630 | 	int i, ret; | 
 | 1631 |  | 
 | 1632 | 	/* stop if shared driver failed, assume driver->probe will be | 
 | 1633 | 	 * called in the same order we register devices (correct ?) */ | 
 | 1634 | 	if (!bcm_enet_shared_base) | 
 | 1635 | 		return -ENODEV; | 
 | 1636 |  | 
 | 1637 | 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 1638 | 	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 
 | 1639 | 	res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | 
 | 1640 | 	res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); | 
 | 1641 | 	if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) | 
 | 1642 | 		return -ENODEV; | 
 | 1643 |  | 
 | 1644 | 	ret = 0; | 
 | 1645 | 	dev = alloc_etherdev(sizeof(*priv)); | 
 | 1646 | 	if (!dev) | 
 | 1647 | 		return -ENOMEM; | 
 | 1648 | 	priv = netdev_priv(dev); | 
| Maxime Bizon | 9b1fc55 | 2009-08-18 13:23:40 +0100 | [diff] [blame] | 1649 |  | 
 | 1650 | 	ret = compute_hw_mtu(priv, dev->mtu); | 
 | 1651 | 	if (ret) | 
 | 1652 | 		goto out; | 
 | 1653 |  | 
 | 1654 | 	iomem_size = res_mem->end - res_mem->start + 1; | 
 | 1655 | 	if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) { | 
 | 1656 | 		ret = -EBUSY; | 
 | 1657 | 		goto out; | 
 | 1658 | 	} | 
 | 1659 |  | 
 | 1660 | 	priv->base = ioremap(res_mem->start, iomem_size); | 
 | 1661 | 	if (priv->base == NULL) { | 
 | 1662 | 		ret = -ENOMEM; | 
 | 1663 | 		goto out_release_mem; | 
 | 1664 | 	} | 
 | 1665 | 	dev->irq = priv->irq = res_irq->start; | 
 | 1666 | 	priv->irq_rx = res_irq_rx->start; | 
 | 1667 | 	priv->irq_tx = res_irq_tx->start; | 
 | 1668 | 	priv->mac_id = pdev->id; | 
 | 1669 |  | 
 | 1670 | 	/* get rx & tx dma channel id for this mac */ | 
 | 1671 | 	if (priv->mac_id == 0) { | 
 | 1672 | 		priv->rx_chan = 0; | 
 | 1673 | 		priv->tx_chan = 1; | 
 | 1674 | 		clk_name = "enet0"; | 
 | 1675 | 	} else { | 
 | 1676 | 		priv->rx_chan = 2; | 
 | 1677 | 		priv->tx_chan = 3; | 
 | 1678 | 		clk_name = "enet1"; | 
 | 1679 | 	} | 
 | 1680 |  | 
 | 1681 | 	priv->mac_clk = clk_get(&pdev->dev, clk_name); | 
 | 1682 | 	if (IS_ERR(priv->mac_clk)) { | 
 | 1683 | 		ret = PTR_ERR(priv->mac_clk); | 
 | 1684 | 		goto out_unmap; | 
 | 1685 | 	} | 
 | 1686 | 	clk_enable(priv->mac_clk); | 
 | 1687 |  | 
 | 1688 | 	/* initialize default and fetch platform data */ | 
 | 1689 | 	priv->rx_ring_size = BCMENET_DEF_RX_DESC; | 
 | 1690 | 	priv->tx_ring_size = BCMENET_DEF_TX_DESC; | 
 | 1691 |  | 
 | 1692 | 	pd = pdev->dev.platform_data; | 
 | 1693 | 	if (pd) { | 
 | 1694 | 		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); | 
 | 1695 | 		priv->has_phy = pd->has_phy; | 
 | 1696 | 		priv->phy_id = pd->phy_id; | 
 | 1697 | 		priv->has_phy_interrupt = pd->has_phy_interrupt; | 
 | 1698 | 		priv->phy_interrupt = pd->phy_interrupt; | 
 | 1699 | 		priv->use_external_mii = !pd->use_internal_phy; | 
 | 1700 | 		priv->pause_auto = pd->pause_auto; | 
 | 1701 | 		priv->pause_rx = pd->pause_rx; | 
 | 1702 | 		priv->pause_tx = pd->pause_tx; | 
 | 1703 | 		priv->force_duplex_full = pd->force_duplex_full; | 
 | 1704 | 		priv->force_speed_100 = pd->force_speed_100; | 
 | 1705 | 	} | 
 | 1706 |  | 
 | 1707 | 	if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { | 
 | 1708 | 		/* using internal PHY, enable clock */ | 
 | 1709 | 		priv->phy_clk = clk_get(&pdev->dev, "ephy"); | 
 | 1710 | 		if (IS_ERR(priv->phy_clk)) { | 
 | 1711 | 			ret = PTR_ERR(priv->phy_clk); | 
 | 1712 | 			priv->phy_clk = NULL; | 
 | 1713 | 			goto out_put_clk_mac; | 
 | 1714 | 		} | 
 | 1715 | 		clk_enable(priv->phy_clk); | 
 | 1716 | 	} | 
 | 1717 |  | 
 | 1718 | 	/* do minimal hardware init to be able to probe mii bus */ | 
 | 1719 | 	bcm_enet_hw_preinit(priv); | 
 | 1720 |  | 
 | 1721 | 	/* MII bus registration */ | 
 | 1722 | 	if (priv->has_phy) { | 
 | 1723 |  | 
 | 1724 | 		priv->mii_bus = mdiobus_alloc(); | 
 | 1725 | 		if (!priv->mii_bus) { | 
 | 1726 | 			ret = -ENOMEM; | 
 | 1727 | 			goto out_uninit_hw; | 
 | 1728 | 		} | 
 | 1729 |  | 
 | 1730 | 		bus = priv->mii_bus; | 
 | 1731 | 		bus->name = "bcm63xx_enet MII bus"; | 
 | 1732 | 		bus->parent = &pdev->dev; | 
 | 1733 | 		bus->priv = priv; | 
 | 1734 | 		bus->read = bcm_enet_mdio_read_phylib; | 
 | 1735 | 		bus->write = bcm_enet_mdio_write_phylib; | 
 | 1736 | 		sprintf(bus->id, "%d", priv->mac_id); | 
 | 1737 |  | 
 | 1738 | 		/* only probe bus where we think the PHY is, because | 
 | 1739 | 		 * the mdio read operation return 0 instead of 0xffff | 
 | 1740 | 		 * if a slave is not present on hw */ | 
 | 1741 | 		bus->phy_mask = ~(1 << priv->phy_id); | 
 | 1742 |  | 
 | 1743 | 		bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | 
 | 1744 | 		if (!bus->irq) { | 
 | 1745 | 			ret = -ENOMEM; | 
 | 1746 | 			goto out_free_mdio; | 
 | 1747 | 		} | 
 | 1748 |  | 
 | 1749 | 		if (priv->has_phy_interrupt) | 
 | 1750 | 			bus->irq[priv->phy_id] = priv->phy_interrupt; | 
 | 1751 | 		else | 
 | 1752 | 			bus->irq[priv->phy_id] = PHY_POLL; | 
 | 1753 |  | 
 | 1754 | 		ret = mdiobus_register(bus); | 
 | 1755 | 		if (ret) { | 
 | 1756 | 			dev_err(&pdev->dev, "unable to register mdio bus\n"); | 
 | 1757 | 			goto out_free_mdio; | 
 | 1758 | 		} | 
 | 1759 | 	} else { | 
 | 1760 |  | 
 | 1761 | 		/* run platform code to initialize PHY device */ | 
 | 1762 | 		if (pd->mii_config && | 
 | 1763 | 		    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, | 
 | 1764 | 				   bcm_enet_mdio_write_mii)) { | 
 | 1765 | 			dev_err(&pdev->dev, "unable to configure mdio bus\n"); | 
 | 1766 | 			goto out_uninit_hw; | 
 | 1767 | 		} | 
 | 1768 | 	} | 
 | 1769 |  | 
 | 1770 | 	spin_lock_init(&priv->rx_lock); | 
 | 1771 |  | 
 | 1772 | 	/* init rx timeout (used for oom) */ | 
 | 1773 | 	init_timer(&priv->rx_timeout); | 
 | 1774 | 	priv->rx_timeout.function = bcm_enet_refill_rx_timer; | 
 | 1775 | 	priv->rx_timeout.data = (unsigned long)dev; | 
 | 1776 |  | 
 | 1777 | 	/* init the mib update lock&work */ | 
 | 1778 | 	mutex_init(&priv->mib_update_lock); | 
 | 1779 | 	INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); | 
 | 1780 |  | 
 | 1781 | 	/* zero mib counters */ | 
 | 1782 | 	for (i = 0; i < ENET_MIB_REG_COUNT; i++) | 
 | 1783 | 		enet_writel(priv, 0, ENET_MIB_REG(i)); | 
 | 1784 |  | 
 | 1785 | 	/* register netdevice */ | 
 | 1786 | 	dev->netdev_ops = &bcm_enet_ops; | 
 | 1787 | 	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); | 
 | 1788 |  | 
 | 1789 | 	SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); | 
 | 1790 | 	SET_NETDEV_DEV(dev, &pdev->dev); | 
 | 1791 |  | 
 | 1792 | 	ret = register_netdev(dev); | 
 | 1793 | 	if (ret) | 
 | 1794 | 		goto out_unregister_mdio; | 
 | 1795 |  | 
 | 1796 | 	netif_carrier_off(dev); | 
 | 1797 | 	platform_set_drvdata(pdev, dev); | 
 | 1798 | 	priv->pdev = pdev; | 
 | 1799 | 	priv->net_dev = dev; | 
 | 1800 |  | 
 | 1801 | 	return 0; | 
 | 1802 |  | 
 | 1803 | out_unregister_mdio: | 
 | 1804 | 	if (priv->mii_bus) { | 
 | 1805 | 		mdiobus_unregister(priv->mii_bus); | 
 | 1806 | 		kfree(priv->mii_bus->irq); | 
 | 1807 | 	} | 
 | 1808 |  | 
 | 1809 | out_free_mdio: | 
 | 1810 | 	if (priv->mii_bus) | 
 | 1811 | 		mdiobus_free(priv->mii_bus); | 
 | 1812 |  | 
 | 1813 | out_uninit_hw: | 
 | 1814 | 	/* turn off mdc clock */ | 
 | 1815 | 	enet_writel(priv, 0, ENET_MIISC_REG); | 
 | 1816 | 	if (priv->phy_clk) { | 
 | 1817 | 		clk_disable(priv->phy_clk); | 
 | 1818 | 		clk_put(priv->phy_clk); | 
 | 1819 | 	} | 
 | 1820 |  | 
 | 1821 | out_put_clk_mac: | 
 | 1822 | 	clk_disable(priv->mac_clk); | 
 | 1823 | 	clk_put(priv->mac_clk); | 
 | 1824 |  | 
 | 1825 | out_unmap: | 
 | 1826 | 	iounmap(priv->base); | 
 | 1827 |  | 
 | 1828 | out_release_mem: | 
 | 1829 | 	release_mem_region(res_mem->start, iomem_size); | 
 | 1830 | out: | 
 | 1831 | 	free_netdev(dev); | 
 | 1832 | 	return ret; | 
 | 1833 | } | 
 | 1834 |  | 
 | 1835 |  | 
 | 1836 | /* | 
 | 1837 |  * exit func, stops hardware and unregisters netdevice | 
 | 1838 |  */ | 
 | 1839 | static int __devexit bcm_enet_remove(struct platform_device *pdev) | 
 | 1840 | { | 
 | 1841 | 	struct bcm_enet_priv *priv; | 
 | 1842 | 	struct net_device *dev; | 
 | 1843 | 	struct resource *res; | 
 | 1844 |  | 
 | 1845 | 	/* stop netdevice */ | 
 | 1846 | 	dev = platform_get_drvdata(pdev); | 
 | 1847 | 	priv = netdev_priv(dev); | 
 | 1848 | 	unregister_netdev(dev); | 
 | 1849 |  | 
 | 1850 | 	/* turn off mdc clock */ | 
 | 1851 | 	enet_writel(priv, 0, ENET_MIISC_REG); | 
 | 1852 |  | 
 | 1853 | 	if (priv->has_phy) { | 
 | 1854 | 		mdiobus_unregister(priv->mii_bus); | 
 | 1855 | 		kfree(priv->mii_bus->irq); | 
 | 1856 | 		mdiobus_free(priv->mii_bus); | 
 | 1857 | 	} else { | 
 | 1858 | 		struct bcm63xx_enet_platform_data *pd; | 
 | 1859 |  | 
 | 1860 | 		pd = pdev->dev.platform_data; | 
 | 1861 | 		if (pd && pd->mii_config) | 
 | 1862 | 			pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, | 
 | 1863 | 				       bcm_enet_mdio_write_mii); | 
 | 1864 | 	} | 
 | 1865 |  | 
 | 1866 | 	/* release device resources */ | 
 | 1867 | 	iounmap(priv->base); | 
 | 1868 | 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 1869 | 	release_mem_region(res->start, res->end - res->start + 1); | 
 | 1870 |  | 
 | 1871 | 	/* disable hw block clocks */ | 
 | 1872 | 	if (priv->phy_clk) { | 
 | 1873 | 		clk_disable(priv->phy_clk); | 
 | 1874 | 		clk_put(priv->phy_clk); | 
 | 1875 | 	} | 
 | 1876 | 	clk_disable(priv->mac_clk); | 
 | 1877 | 	clk_put(priv->mac_clk); | 
 | 1878 |  | 
 | 1879 | 	platform_set_drvdata(pdev, NULL); | 
 | 1880 | 	free_netdev(dev); | 
 | 1881 | 	return 0; | 
 | 1882 | } | 
 | 1883 |  | 
 | 1884 | struct platform_driver bcm63xx_enet_driver = { | 
 | 1885 | 	.probe	= bcm_enet_probe, | 
 | 1886 | 	.remove	= __devexit_p(bcm_enet_remove), | 
 | 1887 | 	.driver	= { | 
 | 1888 | 		.name	= "bcm63xx_enet", | 
 | 1889 | 		.owner  = THIS_MODULE, | 
 | 1890 | 	}, | 
 | 1891 | }; | 
 | 1892 |  | 
 | 1893 | /* | 
 | 1894 |  * reserve & remap memory space shared between all macs | 
 | 1895 |  */ | 
 | 1896 | static int __devinit bcm_enet_shared_probe(struct platform_device *pdev) | 
 | 1897 | { | 
 | 1898 | 	struct resource *res; | 
 | 1899 | 	unsigned int iomem_size; | 
 | 1900 |  | 
 | 1901 | 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 1902 | 	if (!res) | 
 | 1903 | 		return -ENODEV; | 
 | 1904 |  | 
 | 1905 | 	iomem_size = res->end - res->start + 1; | 
 | 1906 | 	if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) | 
 | 1907 | 		return -EBUSY; | 
 | 1908 |  | 
 | 1909 | 	bcm_enet_shared_base = ioremap(res->start, iomem_size); | 
 | 1910 | 	if (!bcm_enet_shared_base) { | 
 | 1911 | 		release_mem_region(res->start, iomem_size); | 
 | 1912 | 		return -ENOMEM; | 
 | 1913 | 	} | 
 | 1914 | 	return 0; | 
 | 1915 | } | 
 | 1916 |  | 
 | 1917 | static int __devexit bcm_enet_shared_remove(struct platform_device *pdev) | 
 | 1918 | { | 
 | 1919 | 	struct resource *res; | 
 | 1920 |  | 
 | 1921 | 	iounmap(bcm_enet_shared_base); | 
 | 1922 | 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 1923 | 	release_mem_region(res->start, res->end - res->start + 1); | 
 | 1924 | 	return 0; | 
 | 1925 | } | 
 | 1926 |  | 
 | 1927 | /* | 
 | 1928 |  * this "shared" driver is needed because both macs share a single | 
 | 1929 |  * address space | 
 | 1930 |  */ | 
 | 1931 | struct platform_driver bcm63xx_enet_shared_driver = { | 
 | 1932 | 	.probe	= bcm_enet_shared_probe, | 
 | 1933 | 	.remove	= __devexit_p(bcm_enet_shared_remove), | 
 | 1934 | 	.driver	= { | 
 | 1935 | 		.name	= "bcm63xx_enet_shared", | 
 | 1936 | 		.owner  = THIS_MODULE, | 
 | 1937 | 	}, | 
 | 1938 | }; | 
 | 1939 |  | 
 | 1940 | /* | 
 | 1941 |  * entry point | 
 | 1942 |  */ | 
 | 1943 | static int __init bcm_enet_init(void) | 
 | 1944 | { | 
 | 1945 | 	int ret; | 
 | 1946 |  | 
 | 1947 | 	ret = platform_driver_register(&bcm63xx_enet_shared_driver); | 
 | 1948 | 	if (ret) | 
 | 1949 | 		return ret; | 
 | 1950 |  | 
 | 1951 | 	ret = platform_driver_register(&bcm63xx_enet_driver); | 
 | 1952 | 	if (ret) | 
 | 1953 | 		platform_driver_unregister(&bcm63xx_enet_shared_driver); | 
 | 1954 |  | 
 | 1955 | 	return ret; | 
 | 1956 | } | 
 | 1957 |  | 
 | 1958 | static void __exit bcm_enet_exit(void) | 
 | 1959 | { | 
 | 1960 | 	platform_driver_unregister(&bcm63xx_enet_driver); | 
 | 1961 | 	platform_driver_unregister(&bcm63xx_enet_shared_driver); | 
 | 1962 | } | 
 | 1963 |  | 
 | 1964 |  | 
 | 1965 | module_init(bcm_enet_init); | 
 | 1966 | module_exit(bcm_enet_exit); | 
 | 1967 |  | 
 | 1968 | MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); | 
 | 1969 | MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); | 
 | 1970 | MODULE_LICENSE("GPL"); |