blob: 37b3332656298cc704604f190fd360b3489ebb71 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
4 *
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10005 * Right now, I am very wasteful with the buffers. I allocate memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
10 * small packets.
11 *
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
14 *
Greg Ungerer562d2f82005-11-07 14:09:50 +100015 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +100017 *
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
Philippe De Muyter677177c2006-06-27 13:05:33 +100019 * Copyright (c) 2004-2006 Macq Electronique SA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/string.h>
25#include <linux/ptrace.h>
26#include <linux/errno.h>
27#include <linux/ioport.h>
28#include <linux/slab.h>
29#include <linux/interrupt.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/spinlock.h>
37#include <linux/workqueue.h>
38#include <linux/bitops.h>
Sascha Hauer6f501b12009-01-28 23:03:05 +000039#include <linux/io.h>
40#include <linux/irq.h>
Sascha Hauer196719e2009-01-28 23:03:10 +000041#include <linux/clk.h>
Sascha Haueread73182009-01-28 23:03:11 +000042#include <linux/platform_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Greg Ungerer080853a2007-07-30 16:28:46 +100044#include <asm/cacheflush.h>
Sascha Hauer196719e2009-01-28 23:03:10 +000045
46#ifndef CONFIG_ARCH_MXC
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/coldfire.h>
48#include <asm/mcfsim.h>
Sascha Hauer196719e2009-01-28 23:03:10 +000049#endif
Sascha Hauer6f501b12009-01-28 23:03:05 +000050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include "fec.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Sascha Hauer196719e2009-01-28 23:03:10 +000053#ifdef CONFIG_ARCH_MXC
54#include <mach/hardware.h>
55#define FEC_ALIGNMENT 0xf
56#else
57#define FEC_ALIGNMENT 0x3
58#endif
59
Sascha Haueread73182009-01-28 23:03:11 +000060/*
61 * Define the fixed address of the FEC hardware.
62 */
Greg Ungerer87f4abb2008-06-06 15:55:36 +100063#if defined(CONFIG_M5272)
Sebastian Siewiorc1d96152008-05-01 14:04:02 +100064#define HAVE_mii_link_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66static unsigned char fec_mac_default[] = {
67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68};
69
70/*
71 * Some hardware gets it MAC address out of local flash memory.
72 * if this is non-zero then assume it is the address to get MAC from.
73 */
74#if defined(CONFIG_NETtel)
75#define FEC_FLASHMAC 0xf0006006
76#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
77#define FEC_FLASHMAC 0xf0006000
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#elif defined(CONFIG_CANCam)
79#define FEC_FLASHMAC 0xf0020000
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +100080#elif defined (CONFIG_M5272C3)
81#define FEC_FLASHMAC (0xffe04000 + 4)
82#elif defined(CONFIG_MOD5272)
83#define FEC_FLASHMAC 0xffc0406b
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#else
85#define FEC_FLASHMAC 0
86#endif
Greg Ungerer43be6362009-02-26 22:42:51 -080087#endif /* CONFIG_M5272 */
Sascha Haueread73182009-01-28 23:03:11 +000088
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/* Forward declarations of some structures to support different PHYs
90*/
91
92typedef struct {
93 uint mii_data;
94 void (*funct)(uint mii_reg, struct net_device *dev);
95} phy_cmd_t;
96
97typedef struct {
98 uint id;
99 char *name;
100
101 const phy_cmd_t *config;
102 const phy_cmd_t *startup;
103 const phy_cmd_t *ack_int;
104 const phy_cmd_t *shutdown;
105} phy_info_t;
106
107/* The number of Tx and Rx buffers. These are allocated from the page
108 * pool. The code may assume these are power of two, so it it best
109 * to keep them that size.
110 * We don't need to allocate pages for the transmitter. We just use
111 * the skbuffer directly.
112 */
113#define FEC_ENET_RX_PAGES 8
114#define FEC_ENET_RX_FRSIZE 2048
115#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
116#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
117#define FEC_ENET_TX_FRSIZE 2048
118#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
119#define TX_RING_SIZE 16 /* Must be power of two */
120#define TX_RING_MOD_MASK 15 /* for this to work */
121
Greg Ungerer562d2f82005-11-07 14:09:50 +1000122#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
Matt Waddel6b265292006-06-27 13:10:56 +1000123#error "FEC: descriptor ring size constants too large"
Greg Ungerer562d2f82005-11-07 14:09:50 +1000124#endif
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126/* Interrupt events/masks.
127*/
128#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
129#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
130#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
131#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
132#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
133#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
134#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
135#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
136#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
137#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
138
139/* The FEC stores dest/src/type, data, and checksum for receive packets.
140 */
141#define PKT_MAXBUF_SIZE 1518
142#define PKT_MINBUF_SIZE 64
143#define PKT_MAXBLR_SIZE 1520
144
145
146/*
Matt Waddel6b265292006-06-27 13:10:56 +1000147 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 * size bits. Other FEC hardware does not, so we need to take that into
149 * account when setting it.
150 */
Greg Ungerer562d2f82005-11-07 14:09:50 +1000151#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
Sascha Hauer196719e2009-01-28 23:03:10 +0000152 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
154#else
155#define OPT_FRAME_SIZE 0
156#endif
157
158/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
159 * tx_bd_base always point to the base of the buffer descriptors. The
160 * cur_rx and cur_tx point to the currently available buffer.
161 * The dirty_tx tracks the current buffer that is being sent by the
162 * controller. The cur_tx and dirty_tx are equal under both completely
163 * empty and completely full conditions. The empty/ready indicator in
164 * the buffer descriptor determines the actual condition.
165 */
166struct fec_enet_private {
167 /* Hardware registers of the FEC device */
Sascha Hauerf44d6302009-04-15 03:11:30 +0000168 void __iomem *hwp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Greg Ungerercb84d6e2007-07-30 16:29:09 +1000170 struct net_device *netdev;
171
Sascha Haueread73182009-01-28 23:03:11 +0000172 struct clk *clk;
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
175 unsigned char *tx_bounce[TX_RING_SIZE];
176 struct sk_buff* tx_skbuff[TX_RING_SIZE];
177 ushort skb_cur;
178 ushort skb_dirty;
179
180 /* CPM dual port RAM relative addresses.
181 */
Sascha Hauer4661e752009-01-28 23:03:07 +0000182 dma_addr_t bd_dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
184 cbd_t *tx_bd_base;
185 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
186 cbd_t *dirty_tx; /* The ring entries to be free()ed. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 uint tx_full;
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000188 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
189 spinlock_t hw_lock;
190 /* hold while accessing the mii_list_t() elements */
191 spinlock_t mii_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 uint phy_id;
194 uint phy_id_done;
195 uint phy_status;
196 uint phy_speed;
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000197 phy_info_t const *phy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 struct work_struct phy_task;
199
200 uint sequence_done;
201 uint mii_phy_task_queued;
202
203 uint phy_addr;
204
205 int index;
206 int opened;
207 int link;
208 int old_link;
209 int full_duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210};
211
212static int fec_enet_open(struct net_device *dev);
213static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
214static void fec_enet_mii(struct net_device *dev);
David Howells7d12e782006-10-05 14:55:46 +0100215static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216static void fec_enet_tx(struct net_device *dev);
217static void fec_enet_rx(struct net_device *dev);
218static int fec_enet_close(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219static void set_multicast_list(struct net_device *dev);
220static void fec_restart(struct net_device *dev, int duplex);
221static void fec_stop(struct net_device *dev);
222static void fec_set_mac_address(struct net_device *dev);
223
224
225/* MII processing. We keep this as simple as possible. Requests are
226 * placed on the list (if there is room). When the request is finished
227 * by the MII, an optional function may be called.
228 */
229typedef struct mii_list {
230 uint mii_regval;
231 void (*mii_func)(uint val, struct net_device *dev);
232 struct mii_list *mii_next;
233} mii_list_t;
234
235#define NMII 20
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000236static mii_list_t mii_cmds[NMII];
237static mii_list_t *mii_free;
238static mii_list_t *mii_head;
239static mii_list_t *mii_tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400241static int mii_queue(struct net_device *dev, int request,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 void (*func)(uint, struct net_device *));
243
244/* Make MII read/write commands for the FEC.
245*/
246#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
247#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
248 (VAL & 0xffff))
249#define mk_mii_end 0
250
251/* Transmitter timeout.
252*/
253#define TX_TIMEOUT (2*HZ)
254
255/* Register definitions for the PHY.
256*/
257
258#define MII_REG_CR 0 /* Control Register */
259#define MII_REG_SR 1 /* Status Register */
260#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
261#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400262#define MII_REG_ANAR 4 /* A-N Advertisement Register */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
264#define MII_REG_ANER 6 /* A-N Expansion Register */
265#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
266#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
267
268/* values for phy_status */
269
270#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
271#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
272#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
273#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400274#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400276#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
278#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
279#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
280#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
281#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
282#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400283#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400285#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287
288static int
289fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
290{
Sascha Hauerf44d6302009-04-15 03:11:30 +0000291 struct fec_enet_private *fep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 volatile cbd_t *bdp;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000293 unsigned short status;
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000294 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 if (!fep->link) {
297 /* Link is down or autonegotiation is in progress. */
298 return 1;
299 }
300
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000301 spin_lock_irqsave(&fep->hw_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 /* Fill in a Tx ring entry */
303 bdp = fep->cur_tx;
304
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000305 status = bdp->cbd_sc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306#ifndef final_version
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000307 if (status & BD_ENET_TX_READY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 /* Ooops. All transmit buffers are full. Bail out.
309 * This should not happen, since dev->tbusy should be set.
310 */
311 printk("%s: tx queue full!.\n", dev->name);
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000312 spin_unlock_irqrestore(&fep->hw_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 return 1;
314 }
315#endif
316
317 /* Clear all of the status flags.
318 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000319 status &= ~BD_ENET_TX_STATS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321 /* Set buffer length and buffer pointer.
322 */
323 bdp->cbd_bufaddr = __pa(skb->data);
324 bdp->cbd_datlen = skb->len;
325
326 /*
327 * On some FEC implementations data must be aligned on
328 * 4-byte boundaries. Use bounce buffers to copy data
329 * and get it aligned. Ugh.
330 */
Sascha Hauer196719e2009-01-28 23:03:10 +0000331 if (bdp->cbd_bufaddr & FEC_ALIGNMENT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 unsigned int index;
333 index = bdp - fep->tx_bd_base;
Sascha Hauer6989f512009-01-28 23:03:06 +0000334 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
336 }
337
338 /* Save skb pointer.
339 */
340 fep->tx_skbuff[fep->skb_cur] = skb;
341
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700342 dev->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 /* Push the data cache so the CPM does not get stale memory
346 * data.
347 */
Sascha Hauerccdc4f12009-01-28 23:03:09 +0000348 dma_sync_single(NULL, bdp->cbd_bufaddr,
349 bdp->cbd_datlen, DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000351 /* Send it on its way. Tell FEC it's ready, interrupt when done,
352 * it's the last BD of the frame, and to put the CRC on the end.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 */
354
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000355 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000357 bdp->cbd_sc = status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
359 dev->trans_start = jiffies;
360
361 /* Trigger transmission start */
Sascha Hauerf44d6302009-04-15 03:11:30 +0000362 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 /* If this was the last BD in the ring, start at the beginning again.
365 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000366 if (status & BD_ENET_TX_WRAP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 bdp = fep->tx_bd_base;
368 } else {
369 bdp++;
370 }
371
372 if (bdp == fep->dirty_tx) {
373 fep->tx_full = 1;
374 netif_stop_queue(dev);
375 }
376
377 fep->cur_tx = (cbd_t *)bdp;
378
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000379 spin_unlock_irqrestore(&fep->hw_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
381 return 0;
382}
383
384static void
385fec_timeout(struct net_device *dev)
386{
387 struct fec_enet_private *fep = netdev_priv(dev);
388
389 printk("%s: transmit timed out.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700390 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391#ifndef final_version
392 {
393 int i;
394 cbd_t *bdp;
395
396 printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n",
397 (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "",
398 (unsigned long)fep->dirty_tx,
399 (unsigned long)fep->cur_rx);
400
401 bdp = fep->tx_bd_base;
402 printk(" tx: %u buffers\n", TX_RING_SIZE);
403 for (i = 0 ; i < TX_RING_SIZE; i++) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400404 printk(" %08x: %04x %04x %08x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 (uint) bdp,
406 bdp->cbd_sc,
407 bdp->cbd_datlen,
408 (int) bdp->cbd_bufaddr);
409 bdp++;
410 }
411
412 bdp = fep->rx_bd_base;
413 printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE);
414 for (i = 0 ; i < RX_RING_SIZE; i++) {
415 printk(" %08x: %04x %04x %08x\n",
416 (uint) bdp,
417 bdp->cbd_sc,
418 bdp->cbd_datlen,
419 (int) bdp->cbd_bufaddr);
420 bdp++;
421 }
422 }
423#endif
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000424 fec_restart(dev, fep->full_duplex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 netif_wake_queue(dev);
426}
427
428/* The interrupt handler.
429 * This is called from the MPC core interrupt.
430 */
431static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +0100432fec_enet_interrupt(int irq, void * dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 struct net_device *dev = dev_id;
Sascha Hauerf44d6302009-04-15 03:11:30 +0000435 struct fec_enet_private *fep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 uint int_events;
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000437 irqreturn_t ret = IRQ_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Sascha Hauerf44d6302009-04-15 03:11:30 +0000439 /* Get the interrupt events that caused us to be here. */
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000440 do {
Sascha Hauerf44d6302009-04-15 03:11:30 +0000441 int_events = readl(fep->hwp + FEC_IEVENT);
442 writel(int_events, fep->hwp + FEC_IEVENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
Sascha Hauerf44d6302009-04-15 03:11:30 +0000444 /* Handle receive event in its own function. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 if (int_events & FEC_ENET_RXF) {
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000446 ret = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 fec_enet_rx(dev);
448 }
449
450 /* Transmit OK, or non-fatal error. Update the buffer
Sascha Hauerf44d6302009-04-15 03:11:30 +0000451 * descriptors. FEC handles all errors, we just discover
452 * them as part of the transmit process.
453 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 if (int_events & FEC_ENET_TXF) {
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000455 ret = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 fec_enet_tx(dev);
457 }
458
459 if (int_events & FEC_ENET_MII) {
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000460 ret = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 fec_enet_mii(dev);
462 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400463
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000464 } while (int_events);
465
466 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467}
468
469
470static void
471fec_enet_tx(struct net_device *dev)
472{
473 struct fec_enet_private *fep;
474 volatile cbd_t *bdp;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000475 unsigned short status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 struct sk_buff *skb;
477
478 fep = netdev_priv(dev);
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000479 spin_lock_irq(&fep->hw_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 bdp = fep->dirty_tx;
481
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000482 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 if (bdp == fep->cur_tx && fep->tx_full == 0) break;
484
485 skb = fep->tx_skbuff[fep->skb_dirty];
486 /* Check for errors. */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000487 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 BD_ENET_TX_RL | BD_ENET_TX_UN |
489 BD_ENET_TX_CSL)) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700490 dev->stats.tx_errors++;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000491 if (status & BD_ENET_TX_HB) /* No heartbeat */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700492 dev->stats.tx_heartbeat_errors++;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000493 if (status & BD_ENET_TX_LC) /* Late collision */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700494 dev->stats.tx_window_errors++;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000495 if (status & BD_ENET_TX_RL) /* Retrans limit */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700496 dev->stats.tx_aborted_errors++;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000497 if (status & BD_ENET_TX_UN) /* Underrun */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700498 dev->stats.tx_fifo_errors++;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000499 if (status & BD_ENET_TX_CSL) /* Carrier lost */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700500 dev->stats.tx_carrier_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 } else {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700502 dev->stats.tx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 }
504
505#ifndef final_version
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000506 if (status & BD_ENET_TX_READY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 printk("HEY! Enet xmit interrupt and TX_READY.\n");
508#endif
509 /* Deferred means some collisions occurred during transmit,
510 * but we eventually sent the packet OK.
511 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000512 if (status & BD_ENET_TX_DEF)
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700513 dev->stats.collisions++;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 /* Free the sk buffer associated with this last transmit.
516 */
517 dev_kfree_skb_any(skb);
518 fep->tx_skbuff[fep->skb_dirty] = NULL;
519 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 /* Update pointer to next buffer descriptor to be transmitted.
522 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000523 if (status & BD_ENET_TX_WRAP)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 bdp = fep->tx_bd_base;
525 else
526 bdp++;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 /* Since we have freed up a buffer, the ring is no longer
529 * full.
530 */
531 if (fep->tx_full) {
532 fep->tx_full = 0;
533 if (netif_queue_stopped(dev))
534 netif_wake_queue(dev);
535 }
536 }
537 fep->dirty_tx = (cbd_t *)bdp;
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000538 spin_unlock_irq(&fep->hw_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539}
540
541
542/* During a receive, the cur_rx points to the current incoming buffer.
543 * When we update through the ring, if the next incoming buffer has
544 * not been given to the system, we just set the empty indicator,
545 * effectively tossing the packet.
546 */
547static void
548fec_enet_rx(struct net_device *dev)
549{
Sascha Hauerf44d6302009-04-15 03:11:30 +0000550 struct fec_enet_private *fep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 volatile cbd_t *bdp;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000552 unsigned short status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 struct sk_buff *skb;
554 ushort pkt_len;
555 __u8 *data;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400556
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000557#ifdef CONFIG_M532x
558 flush_cache_all();
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400559#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000561 spin_lock_irq(&fep->hw_lock);
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 /* First, grab all of the stats for the incoming packet.
564 * These get messed up if we get called due to a busy condition.
565 */
566 bdp = fep->cur_rx;
567
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000568while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
570#ifndef final_version
571 /* Since we have allocated space to hold a complete frame,
572 * the last indicator should be set.
573 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000574 if ((status & BD_ENET_RX_LAST) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 printk("FEC ENET: rcv is not +last\n");
576#endif
577
578 if (!fep->opened)
579 goto rx_processing_done;
580
581 /* Check for errors. */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000582 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700584 dev->stats.rx_errors++;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000585 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 /* Frame too long or too short. */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700587 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 }
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000589 if (status & BD_ENET_RX_NO) /* Frame alignment */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700590 dev->stats.rx_frame_errors++;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000591 if (status & BD_ENET_RX_CR) /* CRC Error */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700592 dev->stats.rx_crc_errors++;
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000593 if (status & BD_ENET_RX_OV) /* FIFO overrun */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700594 dev->stats.rx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 }
596
597 /* Report late collisions as a frame error.
598 * On this error, the BD is closed, but we don't know what we
599 * have in the buffer. So, just drop this frame on the floor.
600 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000601 if (status & BD_ENET_RX_CL) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700602 dev->stats.rx_errors++;
603 dev->stats.rx_frame_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 goto rx_processing_done;
605 }
606
607 /* Process the incoming frame.
608 */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700609 dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 pkt_len = bdp->cbd_datlen;
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700611 dev->stats.rx_bytes += pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 data = (__u8*)__va(bdp->cbd_bufaddr);
613
Sascha Hauerccdc4f12009-01-28 23:03:09 +0000614 dma_sync_single(NULL, (unsigned long)__pa(data),
615 pkt_len - 4, DMA_FROM_DEVICE);
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 /* This does 16 byte alignment, exactly what we need.
618 * The packet length includes FCS, but we don't want to
619 * include that when passing upstream as it messes up
620 * bridging applications.
621 */
622 skb = dev_alloc_skb(pkt_len-4);
623
624 if (skb == NULL) {
625 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700626 dev->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 skb_put(skb,pkt_len-4); /* Make room */
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700629 skb_copy_to_linear_data(skb, data, pkt_len-4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 skb->protocol=eth_type_trans(skb,dev);
631 netif_rx(skb);
632 }
633 rx_processing_done:
634
635 /* Clear the status flags for this buffer.
636 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000637 status &= ~BD_ENET_RX_STATS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639 /* Mark the buffer empty.
640 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000641 status |= BD_ENET_RX_EMPTY;
642 bdp->cbd_sc = status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 /* Update BD pointer to next entry.
645 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000646 if (status & BD_ENET_RX_WRAP)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 bdp = fep->rx_bd_base;
648 else
649 bdp++;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400650
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651#if 1
652 /* Doing this here will keep the FEC running while we process
653 * incoming frames. On a heavily loaded network, we should be
654 * able to keep up at the expense of system resources.
655 */
Sascha Hauerf44d6302009-04-15 03:11:30 +0000656 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657#endif
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000658 } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 fep->cur_rx = (cbd_t *)bdp;
660
661#if 0
662 /* Doing this here will allow us to process all frames in the
663 * ring before the FEC is allowed to put more there. On a heavily
664 * loaded network, some frames may be lost. Unfortunately, this
665 * increases the interrupt overhead since we can potentially work
666 * our way back to the interrupt return only to come right back
667 * here.
668 */
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000669 fecp->fec_r_des_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670#endif
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000671
672 spin_unlock_irq(&fep->hw_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
675
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000676/* called from interrupt context */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677static void
678fec_enet_mii(struct net_device *dev)
679{
680 struct fec_enet_private *fep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 mii_list_t *mip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683 fep = netdev_priv(dev);
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000684 spin_lock_irq(&fep->mii_lock);
685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 if ((mip = mii_head) == NULL) {
687 printk("MII and no head!\n");
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000688 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 }
690
691 if (mip->mii_func != NULL)
Sascha Hauerf44d6302009-04-15 03:11:30 +0000692 (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 mii_head = mip->mii_next;
695 mip->mii_next = mii_free;
696 mii_free = mip;
697
698 if ((mip = mii_head) != NULL)
Sascha Hauerf44d6302009-04-15 03:11:30 +0000699 writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
Greg Ungerer0e702ab2006-06-27 13:19:33 +1000700
701unlock:
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000702 spin_unlock_irq(&fep->mii_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703}
704
705static int
706mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
707{
708 struct fec_enet_private *fep;
709 unsigned long flags;
710 mii_list_t *mip;
711 int retval;
712
713 /* Add PHY address to register command.
714 */
715 fep = netdev_priv(dev);
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000716 spin_lock_irqsave(&fep->mii_lock, flags);
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 regval |= fep->phy_addr << 23;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 retval = 0;
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if ((mip = mii_free) != NULL) {
722 mii_free = mip->mii_next;
723 mip->mii_regval = regval;
724 mip->mii_func = func;
725 mip->mii_next = NULL;
726 if (mii_head) {
727 mii_tail->mii_next = mip;
728 mii_tail = mip;
Philippe De Muyterf909b1e2007-10-23 14:37:54 +1000729 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 mii_head = mii_tail = mip;
Sascha Hauerf44d6302009-04-15 03:11:30 +0000731 writel(regval, fep->hwp + FEC_MII_DATA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 }
Philippe De Muyterf909b1e2007-10-23 14:37:54 +1000733 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 retval = 1;
735 }
736
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +1000737 spin_unlock_irqrestore(&fep->mii_lock, flags);
738 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739}
740
741static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
742{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 if(!c)
744 return;
745
Philippe De Muyterbe6cb662007-10-23 14:37:54 +1000746 for (; c->mii_data != mk_mii_end; c++)
747 mii_queue(dev, c->mii_data, c->funct);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
749
750static void mii_parse_sr(uint mii_reg, struct net_device *dev)
751{
752 struct fec_enet_private *fep = netdev_priv(dev);
753 volatile uint *s = &(fep->phy_status);
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000754 uint status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000756 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758 if (mii_reg & 0x0004)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000759 status |= PHY_STAT_LINK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 if (mii_reg & 0x0010)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000761 status |= PHY_STAT_FAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 if (mii_reg & 0x0020)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000763 status |= PHY_STAT_ANC;
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000764 *s = status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765}
766
767static void mii_parse_cr(uint mii_reg, struct net_device *dev)
768{
769 struct fec_enet_private *fep = netdev_priv(dev);
770 volatile uint *s = &(fep->phy_status);
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000771 uint status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000773 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 if (mii_reg & 0x1000)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000776 status |= PHY_CONF_ANE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if (mii_reg & 0x4000)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000778 status |= PHY_CONF_LOOP;
779 *s = status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780}
781
782static void mii_parse_anar(uint mii_reg, struct net_device *dev)
783{
784 struct fec_enet_private *fep = netdev_priv(dev);
785 volatile uint *s = &(fep->phy_status);
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000786 uint status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000788 status = *s & ~(PHY_CONF_SPMASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790 if (mii_reg & 0x0020)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000791 status |= PHY_CONF_10HDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 if (mii_reg & 0x0040)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000793 status |= PHY_CONF_10FDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 if (mii_reg & 0x0080)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000795 status |= PHY_CONF_100HDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 if (mii_reg & 0x00100)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000797 status |= PHY_CONF_100FDX;
798 *s = status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799}
800
801/* ------------------------------------------------------------------------- */
802/* The Level one LXT970 is used by many boards */
803
804#define MII_LXT970_MIRROR 16 /* Mirror register */
805#define MII_LXT970_IER 17 /* Interrupt Enable Register */
806#define MII_LXT970_ISR 18 /* Interrupt Status Register */
807#define MII_LXT970_CONFIG 19 /* Configuration Register */
808#define MII_LXT970_CSR 20 /* Chip Status Register */
809
810static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
811{
812 struct fec_enet_private *fep = netdev_priv(dev);
813 volatile uint *s = &(fep->phy_status);
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000814 uint status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000816 status = *s & ~(PHY_STAT_SPMASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 if (mii_reg & 0x0800) {
818 if (mii_reg & 0x1000)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000819 status |= PHY_STAT_100FDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 else
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000821 status |= PHY_STAT_100HDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 } else {
823 if (mii_reg & 0x1000)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000824 status |= PHY_STAT_10FDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 else
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000826 status |= PHY_STAT_10HDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000828 *s = status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829}
830
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000831static phy_cmd_t const phy_cmd_lxt970_config[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 { mk_mii_read(MII_REG_CR), mii_parse_cr },
833 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
834 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000835 };
836static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
838 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
839 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000840 };
841static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 /* read SR and ISR to acknowledge */
843 { mk_mii_read(MII_REG_SR), mii_parse_sr },
844 { mk_mii_read(MII_LXT970_ISR), NULL },
845
846 /* find out the current status */
847 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
848 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000849 };
850static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
852 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000853 };
854static phy_info_t const phy_info_lxt970 = {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400855 .id = 0x07810000,
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000856 .name = "LXT970",
857 .config = phy_cmd_lxt970_config,
858 .startup = phy_cmd_lxt970_startup,
859 .ack_int = phy_cmd_lxt970_ack_int,
860 .shutdown = phy_cmd_lxt970_shutdown
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861};
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863/* ------------------------------------------------------------------------- */
864/* The Level one LXT971 is used on some of my custom boards */
865
866/* register definitions for the 971 */
867
868#define MII_LXT971_PCR 16 /* Port Control Register */
869#define MII_LXT971_SR2 17 /* Status Register 2 */
870#define MII_LXT971_IER 18 /* Interrupt Enable Register */
871#define MII_LXT971_ISR 19 /* Interrupt Status Register */
872#define MII_LXT971_LCR 20 /* LED Control Register */
873#define MII_LXT971_TCR 30 /* Transmit Control Register */
874
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400875/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 * I had some nice ideas of running the MDIO faster...
877 * The 971 should support 8MHz and I tried it, but things acted really
878 * weird, so 2.5 MHz ought to be enough for anyone...
879 */
880
881static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
882{
883 struct fec_enet_private *fep = netdev_priv(dev);
884 volatile uint *s = &(fep->phy_status);
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000885 uint status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000887 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
889 if (mii_reg & 0x0400) {
890 fep->link = 1;
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000891 status |= PHY_STAT_LINK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 } else {
893 fep->link = 0;
894 }
895 if (mii_reg & 0x0080)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000896 status |= PHY_STAT_ANC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if (mii_reg & 0x4000) {
898 if (mii_reg & 0x0200)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000899 status |= PHY_STAT_100FDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 else
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000901 status |= PHY_STAT_100HDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 } else {
903 if (mii_reg & 0x0200)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000904 status |= PHY_STAT_10FDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 else
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000906 status |= PHY_STAT_10HDX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
908 if (mii_reg & 0x0008)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000909 status |= PHY_STAT_FAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000911 *s = status;
912}
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400913
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000914static phy_cmd_t const phy_cmd_lxt971_config[] = {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400915 /* limit to 10MBit because my prototype board
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 * doesn't work with 100. */
917 { mk_mii_read(MII_REG_CR), mii_parse_cr },
918 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
919 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
920 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000921 };
922static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
924 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
925 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
926 /* Somehow does the 971 tell me that the link is down
927 * the first read after power-up.
928 * read here to get a valid value in ack_int */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400929 { mk_mii_read(MII_REG_SR), mii_parse_sr },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000931 };
932static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
933 /* acknowledge the int before reading status ! */
934 { mk_mii_read(MII_LXT971_ISR), NULL },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 /* find out the current status */
936 { mk_mii_read(MII_REG_SR), mii_parse_sr },
937 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000939 };
940static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
942 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000943 };
944static phy_info_t const phy_info_lxt971 = {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400945 .id = 0x0001378e,
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000946 .name = "LXT971",
947 .config = phy_cmd_lxt971_config,
948 .startup = phy_cmd_lxt971_startup,
949 .ack_int = phy_cmd_lxt971_ack_int,
950 .shutdown = phy_cmd_lxt971_shutdown
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951};
952
953/* ------------------------------------------------------------------------- */
954/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
955
956/* register definitions */
957
958#define MII_QS6612_MCR 17 /* Mode Control Register */
959#define MII_QS6612_FTR 27 /* Factory Test Register */
960#define MII_QS6612_MCO 28 /* Misc. Control Register */
961#define MII_QS6612_ISR 29 /* Interrupt Source Register */
962#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
963#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
964
965static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
966{
967 struct fec_enet_private *fep = netdev_priv(dev);
968 volatile uint *s = &(fep->phy_status);
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000969 uint status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000971 status = *s & ~(PHY_STAT_SPMASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 switch((mii_reg >> 2) & 7) {
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000974 case 1: status |= PHY_STAT_10HDX; break;
975 case 2: status |= PHY_STAT_100HDX; break;
976 case 5: status |= PHY_STAT_10FDX; break;
977 case 6: status |= PHY_STAT_100FDX; break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978}
979
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000980 *s = status;
981}
982
983static phy_cmd_t const phy_cmd_qs6612_config[] = {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400984 /* The PHY powers up isolated on the RPX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 * so send a command to allow operation.
986 */
987 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
988
989 /* parse cr and anar to get some info */
990 { mk_mii_read(MII_REG_CR), mii_parse_cr },
991 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
992 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000993 };
994static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
996 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
997 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +1000998 };
999static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 /* we need to read ISR, SR and ANER to acknowledge */
1001 { mk_mii_read(MII_QS6612_ISR), NULL },
1002 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1003 { mk_mii_read(MII_REG_ANER), NULL },
1004
1005 /* read pcr to get info */
1006 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
1007 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001008 };
1009static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
1011 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001012 };
1013static phy_info_t const phy_info_qs6612 = {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001014 .id = 0x00181440,
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001015 .name = "QS6612",
1016 .config = phy_cmd_qs6612_config,
1017 .startup = phy_cmd_qs6612_startup,
1018 .ack_int = phy_cmd_qs6612_ack_int,
1019 .shutdown = phy_cmd_qs6612_shutdown
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020};
1021
1022/* ------------------------------------------------------------------------- */
1023/* AMD AM79C874 phy */
1024
1025/* register definitions for the 874 */
1026
1027#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
1028#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
1029#define MII_AM79C874_DR 18 /* Diagnostic Register */
1030#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
1031#define MII_AM79C874_MCR 21 /* ModeControl Register */
1032#define MII_AM79C874_DC 23 /* Disconnect Counter */
1033#define MII_AM79C874_REC 24 /* Recieve Error Counter */
1034
1035static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
1036{
1037 struct fec_enet_private *fep = netdev_priv(dev);
1038 volatile uint *s = &(fep->phy_status);
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001039 uint status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001041 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
1043 if (mii_reg & 0x0080)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001044 status |= PHY_STAT_ANC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 if (mii_reg & 0x0400)
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001046 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 else
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001048 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
1049
1050 *s = status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051}
1052
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001053static phy_cmd_t const phy_cmd_am79c874_config[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1055 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1056 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
1057 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001058 };
1059static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
1061 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001062 { mk_mii_read(MII_REG_SR), mii_parse_sr },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001064 };
1065static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 /* find out the current status */
1067 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1068 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
1069 /* we only need to read ISR to acknowledge */
1070 { mk_mii_read(MII_AM79C874_ICSR), NULL },
1071 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001072 };
1073static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
1075 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001076 };
1077static phy_info_t const phy_info_am79c874 = {
1078 .id = 0x00022561,
1079 .name = "AM79C874",
1080 .config = phy_cmd_am79c874_config,
1081 .startup = phy_cmd_am79c874_startup,
1082 .ack_int = phy_cmd_am79c874_ack_int,
1083 .shutdown = phy_cmd_am79c874_shutdown
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084};
1085
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087/* ------------------------------------------------------------------------- */
1088/* Kendin KS8721BL phy */
1089
1090/* register definitions for the 8721 */
1091
1092#define MII_KS8721BL_RXERCR 21
Sascha Hauer43268dc2009-01-28 23:03:08 +00001093#define MII_KS8721BL_ICSR 27
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094#define MII_KS8721BL_PHYCR 31
1095
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001096static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1098 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1099 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001100 };
1101static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
1103 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001104 { mk_mii_read(MII_REG_SR), mii_parse_sr },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001106 };
1107static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 /* find out the current status */
1109 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1110 /* we only need to read ISR to acknowledge */
1111 { mk_mii_read(MII_KS8721BL_ICSR), NULL },
1112 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001113 };
1114static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
1116 { mk_mii_end, }
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001117 };
1118static phy_info_t const phy_info_ks8721bl = {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001119 .id = 0x00022161,
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001120 .name = "KS8721BL",
1121 .config = phy_cmd_ks8721bl_config,
1122 .startup = phy_cmd_ks8721bl_startup,
1123 .ack_int = phy_cmd_ks8721bl_ack_int,
1124 .shutdown = phy_cmd_ks8721bl_shutdown
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125};
1126
1127/* ------------------------------------------------------------------------- */
Greg Ungerer562d2f82005-11-07 14:09:50 +10001128/* register definitions for the DP83848 */
1129
1130#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
1131
1132static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
1133{
Wang Chen4cf16532008-11-12 23:38:14 -08001134 struct fec_enet_private *fep = netdev_priv(dev);
Greg Ungerer562d2f82005-11-07 14:09:50 +10001135 volatile uint *s = &(fep->phy_status);
1136
1137 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
1138
1139 /* Link up */
1140 if (mii_reg & 0x0001) {
1141 fep->link = 1;
1142 *s |= PHY_STAT_LINK;
1143 } else
1144 fep->link = 0;
1145 /* Status of link */
1146 if (mii_reg & 0x0010) /* Autonegotioation complete */
1147 *s |= PHY_STAT_ANC;
1148 if (mii_reg & 0x0002) { /* 10MBps? */
1149 if (mii_reg & 0x0004) /* Full Duplex? */
1150 *s |= PHY_STAT_10FDX;
1151 else
1152 *s |= PHY_STAT_10HDX;
1153 } else { /* 100 Mbps? */
1154 if (mii_reg & 0x0004) /* Full Duplex? */
1155 *s |= PHY_STAT_100FDX;
1156 else
1157 *s |= PHY_STAT_100HDX;
1158 }
1159 if (mii_reg & 0x0008)
1160 *s |= PHY_STAT_FAULT;
1161}
1162
1163static phy_info_t phy_info_dp83848= {
1164 0x020005c9,
1165 "DP83848",
1166
1167 (const phy_cmd_t []) { /* config */
1168 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1169 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1170 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
1171 { mk_mii_end, }
1172 },
1173 (const phy_cmd_t []) { /* startup - enable interrupts */
1174 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1175 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1176 { mk_mii_end, }
1177 },
1178 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
1179 { mk_mii_end, }
1180 },
1181 (const phy_cmd_t []) { /* shutdown */
1182 { mk_mii_end, }
1183 },
1184};
1185
1186/* ------------------------------------------------------------------------- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001188static phy_info_t const * const phy_info[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 &phy_info_lxt970,
1190 &phy_info_lxt971,
1191 &phy_info_qs6612,
1192 &phy_info_am79c874,
1193 &phy_info_ks8721bl,
Greg Ungerer562d2f82005-11-07 14:09:50 +10001194 &phy_info_dp83848,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 NULL
1196};
1197
1198/* ------------------------------------------------------------------------- */
Sebastian Siewiorc1d96152008-05-01 14:04:02 +10001199#ifdef HAVE_mii_link_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001201mii_link_interrupt(int irq, void * dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203/*
Greg Ungerer43be6362009-02-26 22:42:51 -08001204 * This is specific to the MII interrupt setup of the M5272EVB.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 */
Greg Ungerer43be6362009-02-26 22:42:51 -08001206static void __inline__ fec_request_mii_intr(struct net_device *dev)
1207{
1208 if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
1209 printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
1210}
1211
1212static void __inline__ fec_disable_phy_intr(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213{
1214 volatile unsigned long *icrp;
Greg Ungerer43be6362009-02-26 22:42:51 -08001215 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
1216 *icrp = 0x08000000;
1217}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Greg Ungerer43be6362009-02-26 22:42:51 -08001219static void __inline__ fec_phy_ack_intr(void)
1220{
1221 volatile unsigned long *icrp;
1222 /* Acknowledge the interrupt */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
Greg Ungererf861d622007-07-30 16:29:16 +10001224 *icrp = 0x0d000000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225}
1226
Greg Ungerer43be6362009-02-26 22:42:51 -08001227#ifdef CONFIG_M5272
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228static void __inline__ fec_get_mac(struct net_device *dev)
1229{
1230 struct fec_enet_private *fep = netdev_priv(dev);
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001231 unsigned char *iap, tmpaddr[ETH_ALEN];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001233 if (FEC_FLASHMAC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 /*
1235 * Get MAC address from FLASH.
1236 * If it is all 1's or 0's, use the default.
1237 */
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001238 iap = (unsigned char *)FEC_FLASHMAC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
1240 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
1241 iap = fec_mac_default;
1242 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
1243 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
1244 iap = fec_mac_default;
1245 } else {
Sascha Hauerf44d6302009-04-15 03:11:30 +00001246 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
1247 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 iap = &tmpaddr[0];
1249 }
1250
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001251 memcpy(dev->dev_addr, iap, ETH_ALEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
1253 /* Adjust MAC if using default MAC address */
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001254 if (iap == fec_mac_default)
1255 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257#endif
1258
1259/* ------------------------------------------------------------------------- */
1260
1261static void mii_display_status(struct net_device *dev)
1262{
1263 struct fec_enet_private *fep = netdev_priv(dev);
1264 volatile uint *s = &(fep->phy_status);
1265
1266 if (!fep->link && !fep->old_link) {
1267 /* Link is still down - don't print anything */
1268 return;
1269 }
1270
1271 printk("%s: status: ", dev->name);
1272
1273 if (!fep->link) {
1274 printk("link down");
1275 } else {
1276 printk("link up");
1277
1278 switch(*s & PHY_STAT_SPMASK) {
1279 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
1280 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
1281 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
1282 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
1283 default:
1284 printk(", Unknown speed/duplex");
1285 }
1286
1287 if (*s & PHY_STAT_ANC)
1288 printk(", auto-negotiation complete");
1289 }
1290
1291 if (*s & PHY_STAT_FAULT)
1292 printk(", remote fault");
1293
1294 printk(".\n");
1295}
1296
Greg Ungerercb84d6e2007-07-30 16:29:09 +10001297static void mii_display_config(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298{
Greg Ungerercb84d6e2007-07-30 16:29:09 +10001299 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1300 struct net_device *dev = fep->netdev;
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001301 uint status = fep->phy_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
1303 /*
1304 ** When we get here, phy_task is already removed from
1305 ** the workqueue. It is thus safe to allow to reuse it.
1306 */
1307 fep->mii_phy_task_queued = 0;
1308 printk("%s: config: auto-negotiation ", dev->name);
1309
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001310 if (status & PHY_CONF_ANE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 printk("on");
1312 else
1313 printk("off");
1314
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001315 if (status & PHY_CONF_100FDX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 printk(", 100FDX");
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001317 if (status & PHY_CONF_100HDX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 printk(", 100HDX");
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001319 if (status & PHY_CONF_10FDX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 printk(", 10FDX");
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001321 if (status & PHY_CONF_10HDX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 printk(", 10HDX");
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001323 if (!(status & PHY_CONF_SPMASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 printk(", No speed/duplex selected?");
1325
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001326 if (status & PHY_CONF_LOOP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 printk(", loopback enabled");
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 printk(".\n");
1330
1331 fep->sequence_done = 1;
1332}
1333
Greg Ungerercb84d6e2007-07-30 16:29:09 +10001334static void mii_relink(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335{
Greg Ungerercb84d6e2007-07-30 16:29:09 +10001336 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1337 struct net_device *dev = fep->netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 int duplex;
1339
1340 /*
1341 ** When we get here, phy_task is already removed from
1342 ** the workqueue. It is thus safe to allow to reuse it.
1343 */
1344 fep->mii_phy_task_queued = 0;
1345 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
1346 mii_display_status(dev);
1347 fep->old_link = fep->link;
1348
1349 if (fep->link) {
1350 duplex = 0;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001351 if (fep->phy_status
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 & (PHY_STAT_100FDX | PHY_STAT_10FDX))
1353 duplex = 1;
1354 fec_restart(dev, duplex);
Philippe De Muyterf909b1e2007-10-23 14:37:54 +10001355 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 fec_stop(dev);
1357
1358#if 0
1359 enable_irq(fep->mii_irq);
1360#endif
1361
1362}
1363
1364/* mii_queue_relink is called in interrupt context from mii_link_interrupt */
1365static void mii_queue_relink(uint mii_reg, struct net_device *dev)
1366{
1367 struct fec_enet_private *fep = netdev_priv(dev);
1368
1369 /*
1370 ** We cannot queue phy_task twice in the workqueue. It
1371 ** would cause an endless loop in the workqueue.
1372 ** Fortunately, if the last mii_relink entry has not yet been
1373 ** executed now, it will do the job for the current interrupt,
1374 ** which is just what we want.
1375 */
1376 if (fep->mii_phy_task_queued)
1377 return;
1378
1379 fep->mii_phy_task_queued = 1;
Greg Ungerercb84d6e2007-07-30 16:29:09 +10001380 INIT_WORK(&fep->phy_task, mii_relink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 schedule_work(&fep->phy_task);
1382}
1383
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001384/* mii_queue_config is called in interrupt context from fec_enet_mii */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385static void mii_queue_config(uint mii_reg, struct net_device *dev)
1386{
1387 struct fec_enet_private *fep = netdev_priv(dev);
1388
1389 if (fep->mii_phy_task_queued)
1390 return;
1391
1392 fep->mii_phy_task_queued = 1;
Greg Ungerercb84d6e2007-07-30 16:29:09 +10001393 INIT_WORK(&fep->phy_task, mii_display_config);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 schedule_work(&fep->phy_task);
1395}
1396
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001397phy_cmd_t const phy_cmd_relink[] = {
1398 { mk_mii_read(MII_REG_CR), mii_queue_relink },
1399 { mk_mii_end, }
1400 };
1401phy_cmd_t const phy_cmd_config[] = {
1402 { mk_mii_read(MII_REG_CR), mii_queue_config },
1403 { mk_mii_end, }
1404 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
1406/* Read remainder of PHY ID.
1407*/
1408static void
1409mii_discover_phy3(uint mii_reg, struct net_device *dev)
1410{
1411 struct fec_enet_private *fep;
1412 int i;
1413
1414 fep = netdev_priv(dev);
1415 fep->phy_id |= (mii_reg & 0xffff);
1416 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
1417
1418 for(i = 0; phy_info[i]; i++) {
1419 if(phy_info[i]->id == (fep->phy_id >> 4))
1420 break;
1421 }
1422
1423 if (phy_info[i])
1424 printk(" -- %s\n", phy_info[i]->name);
1425 else
1426 printk(" -- unknown PHY!\n");
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001427
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 fep->phy = phy_info[i];
1429 fep->phy_id_done = 1;
1430}
1431
1432/* Scan all of the MII PHY addresses looking for someone to respond
1433 * with a valid ID. This usually happens quickly.
1434 */
1435static void
1436mii_discover_phy(uint mii_reg, struct net_device *dev)
1437{
1438 struct fec_enet_private *fep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 uint phytype;
1440
1441 fep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
1443 if (fep->phy_addr < 32) {
1444 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 /* Got first part of ID, now get remainder.
1447 */
1448 fep->phy_id = phytype << 16;
1449 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2),
1450 mii_discover_phy3);
Philippe De Muyterf909b1e2007-10-23 14:37:54 +10001451 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 fep->phy_addr++;
1453 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
1454 mii_discover_phy);
1455 }
1456 } else {
1457 printk("FEC: No PHY device found.\n");
1458 /* Disable external MII interface */
Sascha Hauerf44d6302009-04-15 03:11:30 +00001459 writel(0, fep->hwp + FEC_MII_SPEED);
1460 fep->phy_speed = 0;
Greg Ungerer43be6362009-02-26 22:42:51 -08001461#ifdef HAVE_mii_link_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 fec_disable_phy_intr();
Sascha Haueread73182009-01-28 23:03:11 +00001463#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 }
1465}
1466
1467/* This interrupt occurs when the PHY detects a link change.
1468*/
Sebastian Siewiorc1d96152008-05-01 14:04:02 +10001469#ifdef HAVE_mii_link_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01001471mii_link_interrupt(int irq, void * dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472{
1473 struct net_device *dev = dev_id;
1474 struct fec_enet_private *fep = netdev_priv(dev);
1475
1476 fec_phy_ack_intr();
1477
1478#if 0
1479 disable_irq(fep->mii_irq); /* disable now, enable later */
1480#endif
1481
1482 mii_do_cmd(dev, fep->phy->ack_int);
1483 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
1484
1485 return IRQ_HANDLED;
1486}
Sebastian Siewiorc1d96152008-05-01 14:04:02 +10001487#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
1489static int
1490fec_enet_open(struct net_device *dev)
1491{
1492 struct fec_enet_private *fep = netdev_priv(dev);
1493
1494 /* I should reset the ring buffers here, but I don't yet know
1495 * a simple way to do that.
1496 */
1497 fec_set_mac_address(dev);
1498
1499 fep->sequence_done = 0;
1500 fep->link = 0;
1501
1502 if (fep->phy) {
1503 mii_do_cmd(dev, fep->phy->ack_int);
1504 mii_do_cmd(dev, fep->phy->config);
1505 mii_do_cmd(dev, phy_cmd_config); /* display configuration */
1506
Matt Waddel6b265292006-06-27 13:10:56 +10001507 /* Poll until the PHY tells us its configuration
1508 * (not link state).
1509 * Request is initiated by mii_do_cmd above, but answer
1510 * comes by interrupt.
1511 * This should take about 25 usec per register at 2.5 MHz,
1512 * and we read approximately 5 registers.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 */
1514 while(!fep->sequence_done)
1515 schedule();
1516
1517 mii_do_cmd(dev, fep->phy->startup);
1518
1519 /* Set the initial link state to true. A lot of hardware
1520 * based on this device does not implement a PHY interrupt,
1521 * so we are never notified of link change.
1522 */
1523 fep->link = 1;
1524 } else {
1525 fep->link = 1; /* lets just try it and see */
1526 /* no phy, go full duplex, it's most likely a hub chip */
1527 fec_restart(dev, 1);
1528 }
1529
1530 netif_start_queue(dev);
1531 fep->opened = 1;
1532 return 0; /* Success */
1533}
1534
1535static int
1536fec_enet_close(struct net_device *dev)
1537{
1538 struct fec_enet_private *fep = netdev_priv(dev);
1539
1540 /* Don't know what to do yet.
1541 */
1542 fep->opened = 0;
1543 netif_stop_queue(dev);
1544 fec_stop(dev);
1545
1546 return 0;
1547}
1548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549/* Set or clear the multicast filter for this adaptor.
1550 * Skeleton taken from sunlance driver.
1551 * The CPM Ethernet implementation allows Multicast as well as individual
1552 * MAC address filtering. Some of the drivers check to make sure it is
1553 * a group multicast address, and discard those that are not. I guess I
1554 * will do the same for now, but just remove the test if you want
1555 * individual filtering as well (do the upper net layers want or support
1556 * this kind of feature?).
1557 */
1558
1559#define HASH_BITS 6 /* #bits in hash */
1560#define CRC32_POLY 0xEDB88320
1561
1562static void set_multicast_list(struct net_device *dev)
1563{
Sascha Hauerf44d6302009-04-15 03:11:30 +00001564 struct fec_enet_private *fep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 struct dev_mc_list *dmi;
Sascha Hauerf44d6302009-04-15 03:11:30 +00001566 unsigned int i, j, bit, data, crc, tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 unsigned char hash;
1568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 if (dev->flags&IFF_PROMISC) {
Sascha Hauerf44d6302009-04-15 03:11:30 +00001570 tmp = readl(fep->hwp + FEC_R_CNTRL);
1571 tmp |= 0x8;
1572 writel(tmp, fep->hwp + FEC_R_CNTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 } else {
Sascha Hauerf44d6302009-04-15 03:11:30 +00001574 tmp = readl(fep->hwp + FEC_R_CNTRL);
1575 tmp &= ~0x8;
1576 writel(tmp, fep->hwp + FEC_R_CNTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
1578 if (dev->flags & IFF_ALLMULTI) {
1579 /* Catch all multicast addresses, so set the
1580 * filter to all 1's.
1581 */
Sascha Hauerf44d6302009-04-15 03:11:30 +00001582 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1583 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 } else {
1585 /* Clear filter and add the addresses in hash register.
1586 */
Sascha Hauerf44d6302009-04-15 03:11:30 +00001587 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1588 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001589
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 dmi = dev->mc_list;
1591
1592 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next)
1593 {
1594 /* Only support group multicast for now.
1595 */
1596 if (!(dmi->dmi_addr[0] & 1))
1597 continue;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001598
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 /* calculate crc32 value of mac address
1600 */
1601 crc = 0xffffffff;
1602
1603 for (i = 0; i < dmi->dmi_addrlen; i++)
1604 {
1605 data = dmi->dmi_addr[i];
1606 for (bit = 0; bit < 8; bit++, data >>= 1)
1607 {
1608 crc = (crc >> 1) ^
1609 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1610 }
1611 }
1612
1613 /* only upper 6 bits (HASH_BITS) are used
1614 which point to specific bit in he hash registers
1615 */
1616 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001617
Sascha Hauerf44d6302009-04-15 03:11:30 +00001618 if (hash > 31) {
1619 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1620 tmp |= 1 << (hash - 32);
1621 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1622 } else {
1623 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1624 tmp |= 1 << hash;
1625 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 }
1628 }
1629 }
1630}
1631
1632/* Set a MAC change in hardware.
1633 */
1634static void
1635fec_set_mac_address(struct net_device *dev)
1636{
Sascha Hauerf44d6302009-04-15 03:11:30 +00001637 struct fec_enet_private *fep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
1639 /* Set station address. */
Sascha Hauerf44d6302009-04-15 03:11:30 +00001640 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
1641 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1642 fep->hwp + FEC_ADDR_LOW);
1643 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1644 fep + FEC_ADDR_HIGH);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645}
1646
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 /*
1648 * XXX: We need to clean up on failure exits here.
Sascha Haueread73182009-01-28 23:03:11 +00001649 *
1650 * index is only used in legacy code
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 */
Sascha Haueread73182009-01-28 23:03:11 +00001652int __init fec_enet_init(struct net_device *dev, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653{
1654 struct fec_enet_private *fep = netdev_priv(dev);
1655 unsigned long mem_addr;
1656 volatile cbd_t *bdp;
1657 cbd_t *cbd_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
Greg Ungerer562d2f82005-11-07 14:09:50 +10001660 /* Allocate memory for buffer descriptors.
1661 */
Sascha Hauer4661e752009-01-28 23:03:07 +00001662 mem_addr = (unsigned long)dma_alloc_coherent(NULL, PAGE_SIZE,
1663 &fep->bd_dma, GFP_KERNEL);
Greg Ungerer562d2f82005-11-07 14:09:50 +10001664 if (mem_addr == 0) {
1665 printk("FEC: allocate descriptor memory failed?\n");
1666 return -ENOMEM;
1667 }
1668
Sebastian Siewior3b2b74c2008-05-01 14:08:12 +10001669 spin_lock_init(&fep->hw_lock);
1670 spin_lock_init(&fep->mii_lock);
1671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 fep->index = index;
Sascha Hauerf44d6302009-04-15 03:11:30 +00001673 fep->hwp = (void __iomem *)dev->base_addr;
Greg Ungerercb84d6e2007-07-30 16:29:09 +10001674 fep->netdev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675
1676 /* Whack a reset. We should wait for this.
1677 */
Sascha Hauerf44d6302009-04-15 03:11:30 +00001678 writel(1, fep->hwp + FEC_ECNTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 udelay(10);
1680
Sascha Haueread73182009-01-28 23:03:11 +00001681 /* Set the Ethernet address */
Greg Ungerer43be6362009-02-26 22:42:51 -08001682#ifdef CONFIG_M5272
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 fec_get_mac(dev);
Sascha Haueread73182009-01-28 23:03:11 +00001684#else
1685 {
1686 unsigned long l;
Sascha Hauerf44d6302009-04-15 03:11:30 +00001687 l = readl(fep->hwp + FEC_ADDR_LOW);
Sascha Haueread73182009-01-28 23:03:11 +00001688 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1689 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1690 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1691 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
Sascha Hauerf44d6302009-04-15 03:11:30 +00001692 l = readl(fep->hwp + FEC_ADDR_HIGH);
Sascha Haueread73182009-01-28 23:03:11 +00001693 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1694 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1695 }
1696#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 cbd_base = (cbd_t *)mem_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 /* Set receive and transmit descriptor base.
1701 */
1702 fep->rx_bd_base = cbd_base;
1703 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1704
1705 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1706 fep->cur_rx = fep->rx_bd_base;
1707
1708 fep->skb_cur = fep->skb_dirty = 0;
1709
1710 /* Initialize the receive buffer descriptors.
1711 */
1712 bdp = fep->rx_bd_base;
1713 for (i=0; i<FEC_ENET_RX_PAGES; i++) {
1714
1715 /* Allocate a page.
1716 */
1717 mem_addr = __get_free_page(GFP_KERNEL);
1718 /* XXX: missing check for allocation failure */
1719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 /* Initialize the BD for every fragment in the page.
1721 */
1722 for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
1723 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1724 bdp->cbd_bufaddr = __pa(mem_addr);
1725 mem_addr += FEC_ENET_RX_FRSIZE;
1726 bdp++;
1727 }
1728 }
1729
1730 /* Set the last buffer to wrap.
1731 */
1732 bdp--;
1733 bdp->cbd_sc |= BD_SC_WRAP;
1734
1735 /* ...and the same for transmmit.
1736 */
1737 bdp = fep->tx_bd_base;
1738 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) {
1739 if (j >= FEC_ENET_TX_FRPPG) {
1740 mem_addr = __get_free_page(GFP_KERNEL);
1741 j = 1;
1742 } else {
1743 mem_addr += FEC_ENET_TX_FRSIZE;
1744 j++;
1745 }
1746 fep->tx_bounce[i] = (unsigned char *) mem_addr;
1747
1748 /* Initialize the BD for every fragment in the page.
1749 */
1750 bdp->cbd_sc = 0;
1751 bdp->cbd_bufaddr = 0;
1752 bdp++;
1753 }
1754
1755 /* Set the last buffer to wrap.
1756 */
1757 bdp--;
1758 bdp->cbd_sc |= BD_SC_WRAP;
1759
1760 /* Set receive and transmit descriptor base.
1761 */
Sascha Hauerf44d6302009-04-15 03:11:30 +00001762 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1763 writel((unsigned long)fep->bd_dma + sizeof(cbd_t) * RX_RING_SIZE,
1764 fep->hwp + FEC_X_DES_START);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
Greg Ungerer43be6362009-02-26 22:42:51 -08001766#ifdef HAVE_mii_link_interrupt
1767 fec_request_mii_intr(dev);
Sascha Haueread73182009-01-28 23:03:11 +00001768#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
Sascha Hauerf44d6302009-04-15 03:11:30 +00001770 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1771 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1772 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1773 writel(2, fep->hwp + FEC_ECNTRL);
1774 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
Greg Ungerercc462f72008-05-01 13:35:34 +10001775#ifndef CONFIG_M5272
Sascha Hauerf44d6302009-04-15 03:11:30 +00001776 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1777 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
Greg Ungerercc462f72008-05-01 13:35:34 +10001778#endif
Greg Ungerer562d2f82005-11-07 14:09:50 +10001779
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 /* The FEC Ethernet specific entries in the device structure. */
1781 dev->open = fec_enet_open;
1782 dev->hard_start_xmit = fec_enet_start_xmit;
1783 dev->tx_timeout = fec_timeout;
1784 dev->watchdog_timeo = TX_TIMEOUT;
1785 dev->stop = fec_enet_close;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 dev->set_multicast_list = set_multicast_list;
1787
1788 for (i=0; i<NMII-1; i++)
1789 mii_cmds[i].mii_next = &mii_cmds[i+1];
1790 mii_free = mii_cmds;
1791
1792 /* setup MII interface */
Sascha Hauerf44d6302009-04-15 03:11:30 +00001793 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1794 writel(0, fep->hwp + FEC_X_CNTRL);
Sascha Haueread73182009-01-28 23:03:11 +00001795
1796 /*
1797 * Set MII speed to 2.5 MHz
1798 */
1799 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
1800 / 2500000) / 2) & 0x3F) << 1;
Sascha Hauerf44d6302009-04-15 03:11:30 +00001801 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
Sascha Haueread73182009-01-28 23:03:11 +00001802 fec_restart(dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Matt Waddel6b265292006-06-27 13:10:56 +10001804 /* Clear and enable interrupts */
Sascha Hauerf44d6302009-04-15 03:11:30 +00001805 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1806 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
1807 fep->hwp + FEC_IMASK);
Matt Waddel6b265292006-06-27 13:10:56 +10001808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 /* Queue up command to detect the PHY and initialize the
1810 * remainder of the interface.
1811 */
1812 fep->phy_id_done = 0;
1813 fep->phy_addr = 0;
1814 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
1815
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 return 0;
1817}
1818
1819/* This function is called to start or restart the FEC during a link
1820 * change. This only happens when switching between half and full
1821 * duplex.
1822 */
1823static void
1824fec_restart(struct net_device *dev, int duplex)
1825{
Sascha Hauerf44d6302009-04-15 03:11:30 +00001826 struct fec_enet_private *fep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 volatile cbd_t *bdp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 int i;
1829
Sascha Hauerf44d6302009-04-15 03:11:30 +00001830 /* Whack a reset. We should wait for this. */
1831 writel(1, fep->hwp + FEC_ECNTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 udelay(10);
1833
Sascha Hauerf44d6302009-04-15 03:11:30 +00001834 /* Clear any outstanding interrupt. */
1835 writel(0xffc00000, fep->hwp + FEC_IEVENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
Sascha Hauerf44d6302009-04-15 03:11:30 +00001837 /* Set station address. */
Greg Ungerer7dd6a2a2005-09-12 11:18:10 +10001838 fec_set_mac_address(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839
Sascha Hauerf44d6302009-04-15 03:11:30 +00001840 /* Reset all multicast. */
1841 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1842 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843
Sascha Hauerf44d6302009-04-15 03:11:30 +00001844 /* Set maximum receive buffer size. */
1845 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
Sascha Hauerf44d6302009-04-15 03:11:30 +00001847 /* Set receive and transmit descriptor base. */
1848 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1849 writel((unsigned long)fep->bd_dma + sizeof(cbd_t) * RX_RING_SIZE,
1850 fep->hwp + FEC_X_DES_START);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
1852 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1853 fep->cur_rx = fep->rx_bd_base;
1854
Sascha Hauerf44d6302009-04-15 03:11:30 +00001855 /* Reset SKB transmit buffers. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 fep->skb_cur = fep->skb_dirty = 0;
1857 for (i=0; i<=TX_RING_MOD_MASK; i++) {
1858 if (fep->tx_skbuff[i] != NULL) {
1859 dev_kfree_skb_any(fep->tx_skbuff[i]);
1860 fep->tx_skbuff[i] = NULL;
1861 }
1862 }
1863
Sascha Hauerf44d6302009-04-15 03:11:30 +00001864 /* Initialize the receive buffer descriptors. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 bdp = fep->rx_bd_base;
1866 for (i=0; i<RX_RING_SIZE; i++) {
1867
Sascha Hauerf44d6302009-04-15 03:11:30 +00001868 /* Initialize the BD for every fragment in the page. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1870 bdp++;
1871 }
1872
Sascha Hauerf44d6302009-04-15 03:11:30 +00001873 /* Set the last buffer to wrap. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 bdp--;
1875 bdp->cbd_sc |= BD_SC_WRAP;
1876
Sascha Hauerf44d6302009-04-15 03:11:30 +00001877 /* ...and the same for transmmit. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 bdp = fep->tx_bd_base;
1879 for (i=0; i<TX_RING_SIZE; i++) {
1880
Sascha Hauerf44d6302009-04-15 03:11:30 +00001881 /* Initialize the BD for every fragment in the page. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 bdp->cbd_sc = 0;
1883 bdp->cbd_bufaddr = 0;
1884 bdp++;
1885 }
1886
Sascha Hauerf44d6302009-04-15 03:11:30 +00001887 /* Set the last buffer to wrap. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 bdp--;
1889 bdp->cbd_sc |= BD_SC_WRAP;
1890
Sascha Hauerf44d6302009-04-15 03:11:30 +00001891 /* Enable MII mode. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 if (duplex) {
Sascha Hauerf44d6302009-04-15 03:11:30 +00001893 /* MII enable / FD enable */
1894 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1895 writel(0x04, fep->hwp + FEC_X_CNTRL);
Philippe De Muyterf909b1e2007-10-23 14:37:54 +10001896 } else {
Sascha Hauerf44d6302009-04-15 03:11:30 +00001897 /* MII enable / No Rcv on Xmit */
1898 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1899 writel(0x0, fep->hwp + FEC_X_CNTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 }
1901 fep->full_duplex = duplex;
1902
Sascha Hauerf44d6302009-04-15 03:11:30 +00001903 /* Set MII speed. */
1904 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
Sascha Hauerf44d6302009-04-15 03:11:30 +00001906 /* And last, enable the transmit and receive processing. */
1907 writel(2, fep->hwp + FEC_ECNTRL);
1908 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
Matt Waddel6b265292006-06-27 13:10:56 +10001909
Sascha Hauerf44d6302009-04-15 03:11:30 +00001910 /* Enable interrupts we wish to service. */
1911 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
1912 fep->hwp + FEC_IMASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913}
1914
1915static void
1916fec_stop(struct net_device *dev)
1917{
Sascha Hauerf44d6302009-04-15 03:11:30 +00001918 struct fec_enet_private *fep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919
Philippe De Muyter677177c2006-06-27 13:05:33 +10001920 /*
1921 ** We cannot expect a graceful transmit stop without link !!!
1922 */
Sascha Hauerf44d6302009-04-15 03:11:30 +00001923 if (fep->link) {
1924 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
Philippe De Muyter677177c2006-06-27 13:05:33 +10001925 udelay(10);
Sascha Hauerf44d6302009-04-15 03:11:30 +00001926 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
Philippe De Muyter677177c2006-06-27 13:05:33 +10001927 printk("fec_stop : Graceful transmit stop did not complete !\n");
Sascha Hauerf44d6302009-04-15 03:11:30 +00001928 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
Sascha Hauerf44d6302009-04-15 03:11:30 +00001930 /* Whack a reset. We should wait for this. */
1931 writel(1, fep->hwp + FEC_ECNTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 udelay(10);
1933
Sascha Hauerf44d6302009-04-15 03:11:30 +00001934 /* Clear outstanding MII command interrupts. */
1935 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Sascha Hauerf44d6302009-04-15 03:11:30 +00001937 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1938 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939}
1940
Sascha Haueread73182009-01-28 23:03:11 +00001941static int __devinit
1942fec_probe(struct platform_device *pdev)
1943{
1944 struct fec_enet_private *fep;
1945 struct net_device *ndev;
1946 int i, irq, ret = 0;
1947 struct resource *r;
1948
1949 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1950 if (!r)
1951 return -ENXIO;
1952
1953 r = request_mem_region(r->start, resource_size(r), pdev->name);
1954 if (!r)
1955 return -EBUSY;
1956
1957 /* Init network device */
1958 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1959 if (!ndev)
1960 return -ENOMEM;
1961
1962 SET_NETDEV_DEV(ndev, &pdev->dev);
1963
1964 /* setup board info structure */
1965 fep = netdev_priv(ndev);
1966 memset(fep, 0, sizeof(*fep));
1967
1968 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
1969
1970 if (!ndev->base_addr) {
1971 ret = -ENOMEM;
1972 goto failed_ioremap;
1973 }
1974
1975 platform_set_drvdata(pdev, ndev);
1976
1977 /* This device has up to three irqs on some platforms */
1978 for (i = 0; i < 3; i++) {
1979 irq = platform_get_irq(pdev, i);
1980 if (i && irq < 0)
1981 break;
1982 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1983 if (ret) {
1984 while (i >= 0) {
1985 irq = platform_get_irq(pdev, i);
1986 free_irq(irq, ndev);
1987 i--;
1988 }
1989 goto failed_irq;
1990 }
1991 }
1992
1993 fep->clk = clk_get(&pdev->dev, "fec_clk");
1994 if (IS_ERR(fep->clk)) {
1995 ret = PTR_ERR(fep->clk);
1996 goto failed_clk;
1997 }
1998 clk_enable(fep->clk);
1999
2000 ret = fec_enet_init(ndev, 0);
2001 if (ret)
2002 goto failed_init;
2003
2004 ret = register_netdev(ndev);
2005 if (ret)
2006 goto failed_register;
2007
2008 return 0;
2009
2010failed_register:
2011failed_init:
2012 clk_disable(fep->clk);
2013 clk_put(fep->clk);
2014failed_clk:
2015 for (i = 0; i < 3; i++) {
2016 irq = platform_get_irq(pdev, i);
2017 if (irq > 0)
2018 free_irq(irq, ndev);
2019 }
2020failed_irq:
2021 iounmap((void __iomem *)ndev->base_addr);
2022failed_ioremap:
2023 free_netdev(ndev);
2024
2025 return ret;
2026}
2027
2028static int __devexit
2029fec_drv_remove(struct platform_device *pdev)
2030{
2031 struct net_device *ndev = platform_get_drvdata(pdev);
2032 struct fec_enet_private *fep = netdev_priv(ndev);
2033
2034 platform_set_drvdata(pdev, NULL);
2035
2036 fec_stop(ndev);
2037 clk_disable(fep->clk);
2038 clk_put(fep->clk);
2039 iounmap((void __iomem *)ndev->base_addr);
2040 unregister_netdev(ndev);
2041 free_netdev(ndev);
2042 return 0;
2043}
2044
2045static int
2046fec_suspend(struct platform_device *dev, pm_message_t state)
2047{
2048 struct net_device *ndev = platform_get_drvdata(dev);
2049 struct fec_enet_private *fep;
2050
2051 if (ndev) {
2052 fep = netdev_priv(ndev);
2053 if (netif_running(ndev)) {
2054 netif_device_detach(ndev);
2055 fec_stop(ndev);
2056 }
2057 }
2058 return 0;
2059}
2060
2061static int
2062fec_resume(struct platform_device *dev)
2063{
2064 struct net_device *ndev = platform_get_drvdata(dev);
2065
2066 if (ndev) {
2067 if (netif_running(ndev)) {
2068 fec_enet_init(ndev, 0);
2069 netif_device_attach(ndev);
2070 }
2071 }
2072 return 0;
2073}
2074
2075static struct platform_driver fec_driver = {
2076 .driver = {
2077 .name = "fec",
2078 .owner = THIS_MODULE,
2079 },
2080 .probe = fec_probe,
2081 .remove = __devexit_p(fec_drv_remove),
2082 .suspend = fec_suspend,
2083 .resume = fec_resume,
2084};
2085
2086static int __init
2087fec_enet_module_init(void)
2088{
2089 printk(KERN_INFO "FEC Ethernet Driver\n");
2090
2091 return platform_driver_register(&fec_driver);
2092}
2093
2094static void __exit
2095fec_enet_cleanup(void)
2096{
2097 platform_driver_unregister(&fec_driver);
2098}
2099
2100module_exit(fec_enet_cleanup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101module_init(fec_enet_module_init);
2102
2103MODULE_LICENSE("GPL");