blob: 71ddcffb1c8e4a3101474b9c034d08bc22263951 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14
15#include <linux/platform_device.h>
16
17#include <linux/types.h> /* size_t */
18#include <linux/interrupt.h> /* mark_bh */
19
20#include <linux/netdevice.h> /* struct device, and other headers */
21#include <linux/etherdevice.h> /* eth_type_trans */
22#include <linux/skbuff.h>
23
24#include <linux/proc_fs.h>
25#include <linux/timer.h>
26#include <linux/mii.h>
27
28#include <linux/ethtool.h>
29#include <linux/net_tstamp.h>
30#include <linux/phy.h>
31#include <linux/inet.h>
32
33#include "qfec.h"
34
35#define QFEC_NAME "qfec"
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -070036#define QFEC_DRV_VER "July 14 2011"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define ETH_BUF_SIZE 0x600
39#define MAX_N_BD 50
40#define MAC_ADDR_SIZE 6
41
42#define RX_TX_BD_RATIO 8
43#define RX_BD_NUM 32
44#define TX_BD_NUM (RX_BD_NUM * RX_TX_BD_RATIO)
45#define TX_BD_TI_RATIO 4
46
47/*
48 * logging macros
49 */
50#define QFEC_LOG_PR 1
51#define QFEC_LOG_DBG 2
52#define QFEC_LOG_DBG2 4
53#define QFEC_LOG_MDIO_W 8
54#define QFEC_LOG_MDIO_R 16
55
56static int qfec_debug = QFEC_LOG_PR;
57
58#ifdef QFEC_DEBUG
59# define QFEC_LOG(flag, ...) \
60 do { \
61 if (flag & qfec_debug) \
62 pr_info(__VA_ARGS__); \
63 } while (0)
64#else
65# define QFEC_LOG(flag, ...)
66#endif
67
68#define QFEC_LOG_ERR(...) pr_err(__VA_ARGS__)
69
70/*
71 * driver buffer-descriptor
72 * contains the 4 word HW descriptor plus an additional 4-words.
73 * (See the DSL bits in the BUS-Mode register).
74 */
75#define BD_FLAG_LAST_BD 1
76
77struct buf_desc {
78 struct qfec_buf_desc *p_desc;
79 struct sk_buff *skb;
80 void *buf_virt_addr;
81 void *buf_phys_addr;
82 uint32_t last_bd_flag;
83};
84
85/*
86 *inline functions accessing non-struct qfec_buf_desc elements
87 */
88
89/* skb */
90static inline struct sk_buff *qfec_bd_skbuf_get(struct buf_desc *p_bd)
91{
92 return p_bd->skb;
93};
94
95static inline void qfec_bd_skbuf_set(struct buf_desc *p_bd, struct sk_buff *p)
96{
97 p_bd->skb = p;
98};
99
100/* virtual addr */
101static inline void qfec_bd_virt_set(struct buf_desc *p_bd, void *addr)
102{
103 p_bd->buf_virt_addr = addr;
104};
105
106static inline void *qfec_bd_virt_get(struct buf_desc *p_bd)
107{
108 return p_bd->buf_virt_addr;
109};
110
111/* physical addr */
112static inline void qfec_bd_phys_set(struct buf_desc *p_bd, void *addr)
113{
114 p_bd->buf_phys_addr = addr;
115};
116
117static inline void *qfec_bd_phys_get(struct buf_desc *p_bd)
118{
119 return p_bd->buf_phys_addr;
120};
121
122/* last_bd_flag */
123static inline uint32_t qfec_bd_last_bd(struct buf_desc *p_bd)
124{
125 return (p_bd->last_bd_flag != 0);
126};
127
128static inline void qfec_bd_last_bd_set(struct buf_desc *p_bd)
129{
130 p_bd->last_bd_flag = BD_FLAG_LAST_BD;
131};
132
133/*
134 *inline functions accessing struct qfec_buf_desc elements
135 */
136
137/* ownership bit */
138static inline uint32_t qfec_bd_own(struct buf_desc *p_bd)
139{
140 return p_bd->p_desc->status & BUF_OWN;
141};
142
143static inline void qfec_bd_own_set(struct buf_desc *p_bd)
144{
145 p_bd->p_desc->status |= BUF_OWN ;
146};
147
148static inline void qfec_bd_own_clr(struct buf_desc *p_bd)
149{
150 p_bd->p_desc->status &= ~(BUF_OWN);
151};
152
153static inline uint32_t qfec_bd_status_get(struct buf_desc *p_bd)
154{
155 return p_bd->p_desc->status;
156};
157
158static inline void qfec_bd_status_set(struct buf_desc *p_bd, uint32_t status)
159{
160 p_bd->p_desc->status = status;
161};
162
163static inline uint32_t qfec_bd_status_len(struct buf_desc *p_bd)
164{
165 return BUF_RX_FL_GET((*p_bd->p_desc));
166};
167
168/* control register */
169static inline void qfec_bd_ctl_reset(struct buf_desc *p_bd)
170{
171 p_bd->p_desc->ctl = 0;
172};
173
174static inline uint32_t qfec_bd_ctl_get(struct buf_desc *p_bd)
175{
176 return p_bd->p_desc->ctl;
177};
178
179static inline void qfec_bd_ctl_set(struct buf_desc *p_bd, uint32_t val)
180{
181 p_bd->p_desc->ctl |= val;
182};
183
184static inline void qfec_bd_ctl_wr(struct buf_desc *p_bd, uint32_t val)
185{
186 p_bd->p_desc->ctl = val;
187};
188
189/* pbuf register */
190static inline void *qfec_bd_pbuf_get(struct buf_desc *p_bd)
191{
192 return p_bd->p_desc->p_buf;
193}
194
195static inline void qfec_bd_pbuf_set(struct buf_desc *p_bd, void *p)
196{
197 p_bd->p_desc->p_buf = p;
198}
199
200/* next register */
201static inline void *qfec_bd_next_get(struct buf_desc *p_bd)
202{
203 return p_bd->p_desc->next;
204};
205
206/*
207 * initialize an RX BD w/ a new buf
208 */
209static int qfec_rbd_init(struct net_device *dev, struct buf_desc *p_bd)
210{
211 struct sk_buff *skb;
212 void *p;
213 void *v;
214
215 /* allocate and record ptrs for sk buff */
216 skb = dev_alloc_skb(ETH_BUF_SIZE);
217 if (!skb)
218 goto err;
219
220 qfec_bd_skbuf_set(p_bd, skb);
221
222 v = skb_put(skb, ETH_BUF_SIZE);
223 qfec_bd_virt_set(p_bd, v);
224
225 p = (void *) dma_map_single(&dev->dev,
226 (void *)skb->data, ETH_BUF_SIZE, DMA_FROM_DEVICE);
227 qfec_bd_pbuf_set(p_bd, p);
228 qfec_bd_phys_set(p_bd, p);
229
230 /* populate control register */
231 /* mark the last BD and set end-of-ring bit */
232 qfec_bd_ctl_wr(p_bd, ETH_BUF_SIZE |
233 (qfec_bd_last_bd(p_bd) ? BUF_RX_RER : 0));
234
235 qfec_bd_status_set(p_bd, BUF_OWN);
236
237 if (!(qfec_debug & QFEC_LOG_DBG2))
238 return 0;
239
240 /* debug messages */
241 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p bd\n", __func__, p_bd);
242
243 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p skb\n", __func__, skb);
244
245 QFEC_LOG(QFEC_LOG_DBG2,
246 "%s: %p p_bd, %p data, %p skb_put, %p virt, %p p_buf, %p p\n",
247 __func__, (void *)p_bd,
248 (void *)skb->data, v, /*(void *)skb_put(skb, ETH_BUF_SIZE), */
249 (void *)qfec_bd_virt_get(p_bd), (void *)qfec_bd_pbuf_get(p_bd),
250 (void *)p);
251
252 return 0;
253
254err:
255 return -ENOMEM;
256};
257
258/*
259 * ring structure used to maintain indices of buffer-descriptor (BD) usage
260 *
261 * The RX BDs are normally all pre-allocated with buffers available to be
262 * DMA'd into with received frames. The head indicates the first BD/buffer
263 * containing a received frame, and the tail indicates the oldest BD/buffer
264 * that needs to be restored for use. Head and tail are both initialized
265 * to zero, and n_free is initialized to zero, since all BD are initialized.
266 *
267 * The TX BDs are normally available for use, only being initialized as
268 * TX frames are requested for transmission. The head indicates the
269 * first available BD, and the tail indicate the oldest BD that has
270 * not been acknowledged as transmitted. Head and tail are both initialized
271 * to zero, and n_free is initialized to len, since all are available for use.
272 */
273struct ring {
274 int head;
275 int tail;
276 int n_free;
277 int len;
278};
279
280/* accessory in line functions for struct ring */
281static inline void qfec_ring_init(struct ring *p_ring, int size, int free)
282{
283 p_ring->head = p_ring->tail = 0;
284 p_ring->len = size;
285 p_ring->n_free = free;
286}
287
288static inline int qfec_ring_full(struct ring *p_ring)
289{
290 return (p_ring->n_free == 0);
291};
292
293static inline int qfec_ring_empty(struct ring *p_ring)
294{
295 return (p_ring->n_free == p_ring->len);
296}
297
298static inline void qfec_ring_head_adv(struct ring *p_ring)
299{
300 p_ring->head = ++p_ring->head % p_ring->len;
301 p_ring->n_free--;
302};
303
304static inline void qfec_ring_tail_adv(struct ring *p_ring)
305{
306 p_ring->tail = ++p_ring->tail % p_ring->len;
307 p_ring->n_free++;
308};
309
310static inline int qfec_ring_head(struct ring *p_ring)
311{
312
313 return p_ring->head;
314};
315
316static inline int qfec_ring_tail(struct ring *p_ring)
317{
318 return p_ring->tail;
319};
320
321static inline int qfec_ring_room(struct ring *p_ring)
322{
323 return p_ring->n_free;
324};
325
326/*
327 * counters track normal and abnormal driver events and activity
328 */
329enum cntr {
330 isr = 0,
331 fatal_bus,
332
333 early_tx,
334 tx_no_resource,
335 tx_proc_stopped,
336 tx_jabber_tmout,
337
338 xmit,
339 tx_int,
340 tx_isr,
341 tx_owned,
342 tx_underflow,
343
344 tx_replenish,
345 tx_skb_null,
346 tx_timeout,
347 tx_too_large,
348
349 gmac_isr,
350
351 /* half */
352 norm_int,
353 abnorm_int,
354
355 early_rx,
356 rx_buf_unavail,
357 rx_proc_stopped,
358 rx_watchdog,
359
360 netif_rx_cntr,
361 rx_int,
362 rx_isr,
363 rx_owned,
364 rx_overflow,
365
366 rx_dropped,
367 rx_skb_null,
368 queue_start,
369 queue_stop,
370
371 rx_paddr_nok,
372 ts_ioctl,
373 ts_tx_en,
374 ts_tx_rtn,
375
376 ts_rec,
377 cntr_last,
378};
379
380static char *cntr_name[] = {
381 "isr",
382 "fatal_bus",
383
384 "early_tx",
385 "tx_no_resource",
386 "tx_proc_stopped",
387 "tx_jabber_tmout",
388
389 "xmit",
390 "tx_int",
391 "tx_isr",
392 "tx_owned",
393 "tx_underflow",
394
395 "tx_replenish",
396 "tx_skb_null",
397 "tx_timeout",
398 "tx_too_large",
399
400 "gmac_isr",
401
402 /* half */
403 "norm_int",
404 "abnorm_int",
405
406 "early_rx",
407 "rx_buf_unavail",
408 "rx_proc_stopped",
409 "rx_watchdog",
410
411 "netif_rx",
412 "rx_int",
413 "rx_isr",
414 "rx_owned",
415 "rx_overflow",
416
417 "rx_dropped",
418 "rx_skb_null",
419 "queue_start",
420 "queue_stop",
421
422 "rx_paddr_nok",
423 "ts_ioctl",
424 "ts_tx_en",
425 "ts_tx_rtn",
426
427 "ts_rec",
428 ""
429};
430
431/*
432 * private data
433 */
434
435static struct net_device *qfec_dev;
436
437enum qfec_state {
438 timestamping = 0x04,
439};
440
441struct qfec_priv {
442 struct net_device *net_dev;
443 struct net_device_stats stats; /* req statistics */
444
445 struct device dev;
446
447 spinlock_t xmit_lock;
448 spinlock_t mdio_lock;
449
450 unsigned int state; /* driver state */
451
452 unsigned int bd_size; /* buf-desc alloc size */
453 struct qfec_buf_desc *bd_base; /* * qfec-buf-desc */
454 dma_addr_t tbd_dma; /* dma/phy-addr buf-desc */
455 dma_addr_t rbd_dma; /* dma/phy-addr buf-desc */
456
457 struct resource *mac_res;
458 void *mac_base; /* mac (virt) base address */
459
460 struct resource *clk_res;
461 void *clk_base; /* clk (virt) base address */
462
463 struct resource *fuse_res;
464 void *fuse_base; /* mac addr fuses */
465
466 unsigned int n_tbd; /* # of TX buf-desc */
467 struct ring ring_tbd; /* TX ring */
468 struct buf_desc *p_tbd;
469 unsigned int tx_ic_mod; /* (%) val for setting IC */
470
471 unsigned int n_rbd; /* # of RX buf-desc */
472 struct ring ring_rbd; /* RX ring */
473 struct buf_desc *p_rbd;
474
475 struct buf_desc *p_latest_rbd;
476 struct buf_desc *p_ending_rbd;
477
478 unsigned long cntr[cntr_last]; /* activity counters */
479
480 struct mii_if_info mii; /* used by mii lib */
481
482 int mdio_clk; /* phy mdio clock rate */
483 int phy_id; /* default PHY addr (0) */
484 struct timer_list phy_tmr; /* monitor PHY state */
485};
486
487/*
488 * cntrs display
489 */
490
491static int qfec_cntrs_show(struct device *dev, struct device_attribute *attr,
492 char *buf)
493{
494 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
495 int h = (cntr_last + 1) / 2;
496 int l;
497 int n;
498 int count = PAGE_SIZE;
499
500 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
501
502 l = snprintf(&buf[0], count, "%s:\n", __func__);
503 for (n = 0; n < h; n++) {
504 l += snprintf(&buf[l], count - l,
505 " %12lu %-16s %12lu %s\n",
506 priv->cntr[n], cntr_name[n],
507 priv->cntr[n+h], cntr_name[n+h]);
508 }
509
510 return l;
511}
512
513# define CNTR_INC(priv, name) (priv->cntr[name]++)
514
515/*
516 * functions that manage state
517 */
518static inline void qfec_queue_start(struct net_device *dev)
519{
520 struct qfec_priv *priv = netdev_priv(dev);
521
522 if (netif_queue_stopped(dev)) {
523 netif_wake_queue(dev);
524 CNTR_INC(priv, queue_start);
525 }
526};
527
528static inline void qfec_queue_stop(struct net_device *dev)
529{
530 struct qfec_priv *priv = netdev_priv(dev);
531
532 netif_stop_queue(dev);
533 CNTR_INC(priv, queue_stop);
534};
535
536/*
537 * functions to access and initialize the MAC registers
538 */
539static inline uint32_t qfec_reg_read(struct qfec_priv *priv, uint32_t reg)
540{
541 return ioread32((void *) (priv->mac_base + reg));
542}
543
544static void qfec_reg_write(struct qfec_priv *priv, uint32_t reg, uint32_t val)
545{
546 uint32_t addr = (uint32_t)priv->mac_base + reg;
547
548 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
549 iowrite32(val, (void *)addr);
550}
551
552/*
553 * speed/duplex/pause settings
554 */
555static int qfec_config_show(struct device *dev, struct device_attribute *attr,
556 char *buf)
557{
558 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
559 int cfg = qfec_reg_read(priv, MAC_CONFIG_REG);
560 int flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
561 int l = 0;
562 int count = PAGE_SIZE;
563
564 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
565
566 l += snprintf(&buf[l], count, "%s:", __func__);
567
568 l += snprintf(&buf[l], count - l, " [0x%08x] %4dM %s %s", cfg,
569 (cfg & MAC_CONFIG_REG_PS)
570 ? ((cfg & MAC_CONFIG_REG_FES) ? 100 : 10) : 1000,
571 cfg & MAC_CONFIG_REG_DM ? "FD" : "HD",
572 cfg & MAC_CONFIG_REG_IPC ? "IPC" : "NoIPC");
573
574 flow &= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
575 l += snprintf(&buf[l], count - l, " [0x%08x] %s", flow,
576 (flow == (FLOW_CONTROL_RFE | FLOW_CONTROL_TFE)) ? "PAUSE"
577 : ((flow == FLOW_CONTROL_RFE) ? "RX-PAUSE"
578 : ((flow == FLOW_CONTROL_TFE) ? "TX-PAUSE" : "")));
579
580 l += snprintf(&buf[l], count - l, " %s", QFEC_DRV_VER);
581 l += snprintf(&buf[l], count - l, "\n");
582 return l;
583}
584
585
586/*
587 * table and functions to initialize controller registers
588 */
589
590struct reg_entry {
591 unsigned int rdonly;
592 unsigned int addr;
593 char *label;
594 unsigned int val;
595};
596
597static struct reg_entry qfec_reg_tbl[] = {
598 { 0, BUS_MODE_REG, "BUS_MODE_REG", BUS_MODE_REG_DEFAULT },
599 { 0, AXI_BUS_MODE_REG, "AXI_BUS_MODE_REG", AXI_BUS_MODE_DEFAULT },
600 { 0, AXI_STATUS_REG, "AXI_STATUS_REG", 0 },
601
602 { 0, MAC_ADR_0_HIGH_REG, "MAC_ADR_0_HIGH_REG", 0x00000302 },
603 { 0, MAC_ADR_0_LOW_REG, "MAC_ADR_0_LOW_REG", 0x01350702 },
604
605 { 1, RX_DES_LST_ADR_REG, "RX_DES_LST_ADR_REG", 0 },
606 { 1, TX_DES_LST_ADR_REG, "TX_DES_LST_ADR_REG", 0 },
607 { 1, STATUS_REG, "STATUS_REG", 0 },
608 { 1, DEBUG_REG, "DEBUG_REG", 0 },
609
610 { 0, INTRP_EN_REG, "INTRP_EN_REG", QFEC_INTRP_SETUP},
611
612 { 1, CUR_HOST_TX_DES_REG, "CUR_HOST_TX_DES_REG", 0 },
613 { 1, CUR_HOST_RX_DES_REG, "CUR_HOST_RX_DES_REG", 0 },
614 { 1, CUR_HOST_TX_BU_ADR_REG, "CUR_HOST_TX_BU_ADR_REG", 0 },
615 { 1, CUR_HOST_RX_BU_ADR_REG, "CUR_HOST_RX_BU_ADR_REG", 0 },
616
617 { 1, MAC_FR_FILTER_REG, "MAC_FR_FILTER_REG", 0 },
618
619 { 0, MAC_CONFIG_REG, "MAC_CONFIG_REG", MAC_CONFIG_REG_SPD_1G
620 | MAC_CONFIG_REG_DM
621 | MAC_CONFIG_REG_TE
622 | MAC_CONFIG_REG_RE
623 | MAC_CONFIG_REG_IPC },
624
625 { 1, INTRP_STATUS_REG, "INTRP_STATUS_REG", 0 },
626 { 1, INTRP_MASK_REG, "INTRP_MASK_REG", 0 },
627
628 { 0, OPER_MODE_REG, "OPER_MODE_REG", OPER_MODE_REG_DEFAULT },
629
630 { 1, GMII_ADR_REG, "GMII_ADR_REG", 0 },
631 { 1, GMII_DATA_REG, "GMII_DATA_REG", 0 },
632
633 { 0, MMC_INTR_MASK_RX_REG, "MMC_INTR_MASK_RX_REG", 0xFFFFFFFF },
634 { 0, MMC_INTR_MASK_TX_REG, "MMC_INTR_MASK_TX_REG", 0xFFFFFFFF },
635
636 { 1, TS_HIGH_REG, "TS_HIGH_REG", 0 },
637 { 1, TS_LOW_REG, "TS_LOW_REG", 0 },
638
639 { 1, TS_HI_UPDT_REG, "TS_HI_UPDATE_REG", 0 },
640 { 1, TS_LO_UPDT_REG, "TS_LO_UPDATE_REG", 0 },
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -0700641 { 0, TS_SUB_SEC_INCR_REG, "TS_SUB_SEC_INCR_REG", 1 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 { 0, TS_CTL_REG, "TS_CTL_REG", TS_CTL_TSENALL
643 | TS_CTL_TSCTRLSSR
644 | TS_CTL_TSINIT
645 | TS_CTL_TSENA },
646};
647
648static void qfec_reg_init(struct qfec_priv *priv)
649{
650 struct reg_entry *p = qfec_reg_tbl;
651 int n = ARRAY_SIZE(qfec_reg_tbl);
652
653 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
654
655 for (; n--; p++) {
656 if (!p->rdonly)
657 qfec_reg_write(priv, p->addr, p->val);
658 }
659}
660
661/*
662 * display registers thru sysfs
663 */
664static int qfec_reg_show(struct device *dev, struct device_attribute *attr,
665 char *buf)
666{
667 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
668 struct reg_entry *p = qfec_reg_tbl;
669 int n = ARRAY_SIZE(qfec_reg_tbl);
670 int l = 0;
671 int count = PAGE_SIZE;
672
673 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
674
675 for (; n--; p++) {
676 l += snprintf(&buf[l], count - l, " %8p %04x %08x %s\n",
677 (void *)priv->mac_base + p->addr, p->addr,
678 qfec_reg_read(priv, p->addr), p->label);
679 }
680
681 return l;
682}
683
684/*
685 * set the MAC-0 address
686 */
687static void qfec_set_adr_regs(struct qfec_priv *priv, uint8_t *addr)
688{
689 uint32_t h = 0;
690 uint32_t l = 0;
691
692 h = h << 8 | addr[5];
693 h = h << 8 | addr[4];
694
695 l = l << 8 | addr[3];
696 l = l << 8 | addr[2];
697 l = l << 8 | addr[1];
698 l = l << 8 | addr[0];
699
700 qfec_reg_write(priv, MAC_ADR_0_HIGH_REG, h);
701 qfec_reg_write(priv, MAC_ADR_0_LOW_REG, l);
702
703 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x %08x\n", __func__, h, l);
704}
705
706/*
707 * reset the controller
708 */
709
710#define QFEC_RESET_TIMEOUT 10000
711 /* reset should always clear but did not w/o test/delay
712 * in RgMii mode. there is no spec'd max timeout
713 */
714
715static int qfec_hw_reset(struct qfec_priv *priv)
716{
717 int timeout = QFEC_RESET_TIMEOUT;
718
719 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
720
721 qfec_reg_write(priv, BUS_MODE_REG, BUS_MODE_SWR);
722
723 while (qfec_reg_read(priv, BUS_MODE_REG) & BUS_MODE_SWR) {
724 if (timeout-- == 0) {
725 QFEC_LOG_ERR("%s: timeout\n", __func__);
726 return -ETIME;
727 }
728
729 /* there were problems resetting the controller
730 * in RGMII mode when there wasn't sufficient
731 * delay between register reads
732 */
733 usleep_range(100, 200);
734 }
735
736 return 0;
737}
738
739/*
740 * initialize controller
741 */
742static int qfec_hw_init(struct qfec_priv *priv)
743{
744 int res = 0;
745
746 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
747
748 res = qfec_hw_reset(priv);
749 if (res)
750 return res;
751
752 qfec_reg_init(priv);
753
754 /* config buf-desc locations */
755 qfec_reg_write(priv, TX_DES_LST_ADR_REG, priv->tbd_dma);
756 qfec_reg_write(priv, RX_DES_LST_ADR_REG, priv->rbd_dma);
757
758 /* clear interrupts */
759 qfec_reg_write(priv, STATUS_REG, INTRP_EN_REG_NIE | INTRP_EN_REG_RIE
760 | INTRP_EN_REG_TIE | INTRP_EN_REG_TUE | INTRP_EN_REG_ETE);
761
762 return res;
763}
764
765/*
766 * en/disable controller
767 */
768static void qfec_hw_enable(struct qfec_priv *priv)
769{
770 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
771
772 qfec_reg_write(priv, OPER_MODE_REG,
773 qfec_reg_read(priv, OPER_MODE_REG)
774 | OPER_MODE_REG_ST | OPER_MODE_REG_SR);
775}
776
777static void qfec_hw_disable(struct qfec_priv *priv)
778{
779 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
780
781 qfec_reg_write(priv, OPER_MODE_REG,
782 qfec_reg_read(priv, OPER_MODE_REG)
783 & ~(OPER_MODE_REG_ST | OPER_MODE_REG_SR));
784}
785
786/*
787 * interface selection
788 */
789struct intf_config {
790 uint32_t intf_sel;
791 uint32_t emac_ns;
792 uint32_t eth_x_en_ns;
793 uint32_t clkmux_sel;
794};
795
796#define ETH_X_EN_NS_REVMII (ETH_X_EN_NS_DEFAULT | ETH_TX_CLK_INV)
797#define CLKMUX_REVMII (EMAC_CLKMUX_SEL_0 | EMAC_CLKMUX_SEL_1)
798
799static struct intf_config intf_config_tbl[] = {
800 { EMAC_PHY_INTF_SEL_MII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
801 { EMAC_PHY_INTF_SEL_RGMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
802 { EMAC_PHY_INTF_SEL_REVMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_REVMII,
803 CLKMUX_REVMII }
804};
805
806/*
807 * emac clk register read and write functions
808 */
809static inline uint32_t qfec_clkreg_read(struct qfec_priv *priv, uint32_t reg)
810{
811 return ioread32((void *) (priv->clk_base + reg));
812}
813
814static inline void qfec_clkreg_write(struct qfec_priv *priv,
815 uint32_t reg, uint32_t val)
816{
817 uint32_t addr = (uint32_t)priv->clk_base + reg;
818
819 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
820 iowrite32(val, (void *)addr);
821}
822
823/*
824 * configure the PHY interface and clock routing and signal bits
825 */
826enum phy_intfc {
827 intfc_mii = 0,
828 intfc_rgmii = 1,
829 intfc_revmii = 2,
830};
831
832static int qfec_intf_sel(struct qfec_priv *priv, unsigned int intfc)
833{
834 struct intf_config *p;
835
836 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d\n", __func__, intfc);
837
838 if (intfc > intfc_revmii) {
839 QFEC_LOG_ERR("%s: range\n", __func__);
840 return -ENXIO;
841 }
842
843 p = &intf_config_tbl[intfc];
844
845 qfec_clkreg_write(priv, EMAC_PHY_INTF_SEL_REG, p->intf_sel);
846 qfec_clkreg_write(priv, EMAC_NS_REG, p->emac_ns);
847 qfec_clkreg_write(priv, ETH_X_EN_NS_REG, p->eth_x_en_ns);
848 qfec_clkreg_write(priv, EMAC_CLKMUX_SEL_REG, p->clkmux_sel);
849
850 return 0;
851}
852
853/*
854 * display registers thru proc-fs
855 */
856static struct qfec_clk_reg {
857 uint32_t offset;
858 char *label;
859} qfec_clk_regs[] = {
860 { ETH_MD_REG, "ETH_MD_REG" },
861 { ETH_NS_REG, "ETH_NS_REG" },
862 { ETH_X_EN_NS_REG, "ETH_X_EN_NS_REG" },
863 { EMAC_PTP_MD_REG, "EMAC_PTP_MD_REG" },
864 { EMAC_PTP_NS_REG, "EMAC_PTP_NS_REG" },
865 { EMAC_NS_REG, "EMAC_NS_REG" },
866 { EMAC_TX_FS_REG, "EMAC_TX_FS_REG" },
867 { EMAC_RX_FS_REG, "EMAC_RX_FS_REG" },
868 { EMAC_PHY_INTF_SEL_REG, "EMAC_PHY_INTF_SEL_REG" },
869 { EMAC_PHY_ADDR_REG, "EMAC_PHY_ADDR_REG" },
870 { EMAC_REVMII_PHY_ADDR_REG, "EMAC_REVMII_PHY_ADDR_REG" },
871 { EMAC_CLKMUX_SEL_REG, "EMAC_CLKMUX_SEL_REG" },
872};
873
874static int qfec_clk_reg_show(struct device *dev, struct device_attribute *attr,
875 char *buf)
876{
877 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
878 struct qfec_clk_reg *p = qfec_clk_regs;
879 int n = ARRAY_SIZE(qfec_clk_regs);
880 int l = 0;
881 int count = PAGE_SIZE;
882
883 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
884
885 for (; n--; p++) {
886 l += snprintf(&buf[l], count - l, " %8p %8x %08x %s\n",
887 (void *)priv->clk_base + p->offset, p->offset,
888 qfec_clkreg_read(priv, p->offset), p->label);
889 }
890
891 return l;
892}
893
894/*
895 * speed selection
896 */
897
898struct qfec_pll_cfg {
899 uint32_t spd;
900 uint32_t eth_md; /* M [31:16], NOT 2*D [15:0] */
901 uint32_t eth_ns; /* NOT(M-N) [31:16], ctl bits [11:0] */
902};
903
904static struct qfec_pll_cfg qfec_pll_cfg_tbl[] = {
905 /* 2.5 MHz */
906 { MAC_CONFIG_REG_SPD_10, ETH_MD_M(1) | ETH_MD_2D_N(100),
907 ETH_NS_NM(100-1)
908 | ETH_NS_MCNTR_EN
909 | ETH_NS_MCNTR_MODE_DUAL
910 | ETH_NS_PRE_DIV(0)
911 | CLK_SRC_PLL_EMAC },
912 /* 25 MHz */
913 { MAC_CONFIG_REG_SPD_100, ETH_MD_M(1) | ETH_MD_2D_N(10),
914 ETH_NS_NM(10-1)
915 | ETH_NS_MCNTR_EN
916 | ETH_NS_MCNTR_MODE_DUAL
917 | ETH_NS_PRE_DIV(0)
918 | CLK_SRC_PLL_EMAC },
919 /* 125 MHz */
920 {MAC_CONFIG_REG_SPD_1G, 0, ETH_NS_PRE_DIV(1)
921 | CLK_SRC_PLL_EMAC },
922};
923
924enum speed {
925 spd_10 = 0,
926 spd_100 = 1,
927 spd_1000 = 2,
928};
929
930/*
931 * configure the PHY interface and clock routing and signal bits
932 */
933static int qfec_speed_cfg(struct net_device *dev, unsigned int spd,
934 unsigned int dplx)
935{
936 struct qfec_priv *priv = netdev_priv(dev);
937 struct qfec_pll_cfg *p;
938
939 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d spd, %d dplx\n", __func__, spd, dplx);
940
941 if (spd > spd_1000) {
942 QFEC_LOG_ERR("%s: range\n", __func__);
943 return -ENODEV;
944 }
945
946 p = &qfec_pll_cfg_tbl[spd];
947
948 /* set the MAC speed bits */
949 qfec_reg_write(priv, MAC_CONFIG_REG,
950 (qfec_reg_read(priv, MAC_CONFIG_REG)
951 & ~(MAC_CONFIG_REG_SPD | MAC_CONFIG_REG_DM))
952 | p->spd | (dplx ? MAC_CONFIG_REG_DM : 0));
953
954 qfec_clkreg_write(priv, ETH_MD_REG, p->eth_md);
955 qfec_clkreg_write(priv, ETH_NS_REG, p->eth_ns);
956
957 return 0;
958}
959
960/*
961 * configure PTP divider for 25 MHz assuming EMAC PLL 250 MHz
962 */
963
964static struct qfec_pll_cfg qfec_pll_ptp = {
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -0700965 /* 19.2 MHz tcxo */
966 0, 0, ETH_NS_PRE_DIV(0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 | EMAC_PTP_NS_ROOT_EN
968 | EMAC_PTP_NS_CLK_EN
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -0700969 | CLK_SRC_TCXO
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970};
971
972#define PLLTEST_PAD_CFG 0x01E0
973#define PLLTEST_PLL_7 0x3700
974
975#define CLKTEST_REG 0x01EC
976#define CLKTEST_EMAC_RX 0x3fc07f7a
977
978static int qfec_ptp_cfg(struct qfec_priv *priv)
979{
980 struct qfec_pll_cfg *p = &qfec_pll_ptp;
981
982 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x md, %08x ns\n",
983 __func__, p->eth_md, p->eth_ns);
984
985 qfec_clkreg_write(priv, EMAC_PTP_MD_REG, p->eth_md);
986 qfec_clkreg_write(priv, EMAC_PTP_NS_REG, p->eth_ns);
987
988 /* configure HS/LS clk test ports to verify clks */
989 qfec_clkreg_write(priv, CLKTEST_REG, CLKTEST_EMAC_RX);
990 qfec_clkreg_write(priv, PLLTEST_PAD_CFG, PLLTEST_PLL_7);
991
992 return 0;
993}
994
995/*
996 * MDIO operations
997 */
998
999/*
1000 * wait reasonable amount of time for MDIO operation to complete, not busy
1001 */
1002static int qfec_mdio_busy(struct net_device *dev)
1003{
1004 int i;
1005
1006 for (i = 100; i > 0; i--) {
1007 if (!(qfec_reg_read(
1008 netdev_priv(dev), GMII_ADR_REG) & GMII_ADR_REG_GB)) {
1009 return 0;
1010 }
1011 udelay(1);
1012 }
1013
1014 return -ETIME;
1015}
1016
1017/*
1018 * initiate either a read or write MDIO operation
1019 */
1020
1021static int qfec_mdio_oper(struct net_device *dev, int phy_id, int reg, int wr)
1022{
1023 struct qfec_priv *priv = netdev_priv(dev);
1024 int res = 0;
1025
1026 /* insure phy not busy */
1027 res = qfec_mdio_busy(dev);
1028 if (res) {
1029 QFEC_LOG_ERR("%s: busy\n", __func__);
1030 goto done;
1031 }
1032
1033 /* initiate operation */
1034 qfec_reg_write(priv, GMII_ADR_REG,
1035 GMII_ADR_REG_ADR_SET(phy_id)
1036 | GMII_ADR_REG_REG_SET(reg)
1037 | GMII_ADR_REG_CSR_SET(priv->mdio_clk)
1038 | (wr ? GMII_ADR_REG_GW : 0)
1039 | GMII_ADR_REG_GB);
1040
1041 /* wait for operation to complete */
1042 res = qfec_mdio_busy(dev);
1043 if (res)
1044 QFEC_LOG_ERR("%s: timeout\n", __func__);
1045
1046done:
1047 return res;
1048}
1049
1050/*
1051 * read MDIO register
1052 */
1053static int qfec_mdio_read(struct net_device *dev, int phy_id, int reg)
1054{
1055 struct qfec_priv *priv = netdev_priv(dev);
1056 int res = 0;
1057 unsigned long flags;
1058
1059 spin_lock_irqsave(&priv->mdio_lock, flags);
1060
1061 res = qfec_mdio_oper(dev, phy_id, reg, 0);
1062 if (res) {
1063 QFEC_LOG_ERR("%s: oper\n", __func__);
1064 goto done;
1065 }
1066
1067 res = qfec_reg_read(priv, GMII_DATA_REG);
1068 QFEC_LOG(QFEC_LOG_MDIO_R, "%s: %2d reg, 0x%04x val\n",
1069 __func__, reg, res);
1070
1071done:
1072 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1073 return res;
1074}
1075
1076/*
1077 * write MDIO register
1078 */
1079static void qfec_mdio_write(struct net_device *dev, int phy_id, int reg,
1080 int val)
1081{
1082 struct qfec_priv *priv = netdev_priv(dev);
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&priv->mdio_lock, flags);
1086
1087 QFEC_LOG(QFEC_LOG_MDIO_W, "%s: %2d reg, %04x\n",
1088 __func__, reg, val);
1089
1090 qfec_reg_write(priv, GMII_DATA_REG, val);
1091
1092 if (qfec_mdio_oper(dev, phy_id, reg, 1))
1093 QFEC_LOG_ERR("%s: oper\n", __func__);
1094
1095 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1096}
1097
1098/*
1099 * get auto-negotiation results
1100 */
1101
1102#define QFEC_100 (LPA_100HALF | LPA_100FULL | LPA_100HALF)
1103#define QFEC_100_FD (LPA_100FULL | LPA_100BASE4)
1104#define QFEC_10 (LPA_10HALF | LPA_10FULL)
1105#define QFEC_10_FD LPA_10FULL
1106
1107static void qfec_get_an(struct net_device *dev, uint32_t *spd, uint32_t *dplx)
1108{
1109 struct qfec_priv *priv = netdev_priv(dev);
1110 uint32_t status;
1111 uint32_t advert;
1112 uint32_t lpa;
1113 uint32_t flow;
1114
1115 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
1116 lpa = qfec_mdio_read(dev, priv->phy_id, MII_LPA);
1117 status = advert & lpa;
1118
1119 /* todo: check extended status register for 1G abilities */
1120
1121 if (status & QFEC_100) {
1122 *spd = spd_100;
1123 *dplx = status & QFEC_100_FD ? 1 : 0;
1124 }
1125
1126 else if (status & QFEC_10) {
1127 *spd = spd_10;
1128 *dplx = status & QFEC_10_FD ? 1 : 0;
1129 }
1130
1131 /* check pause */
1132 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
1133 flow &= ~(FLOW_CONTROL_TFE | FLOW_CONTROL_RFE);
1134
1135 if (status & ADVERTISE_PAUSE_CAP) {
1136 flow |= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
1137 } else if (status & ADVERTISE_PAUSE_ASYM) {
1138 if (lpa & ADVERTISE_PAUSE_CAP)
1139 flow |= FLOW_CONTROL_TFE;
1140 else if (advert & ADVERTISE_PAUSE_CAP)
1141 flow |= FLOW_CONTROL_RFE;
1142 }
1143
1144 qfec_reg_write(priv, FLOW_CONTROL_REG, flow);
1145}
1146
1147/*
1148 * monitor phy status, and process auto-neg results when changed
1149 */
1150
1151static void qfec_phy_monitor(unsigned long data)
1152{
1153 struct net_device *dev = (struct net_device *) data;
1154 struct qfec_priv *priv = netdev_priv(dev);
1155 unsigned int spd = 0;
1156 unsigned int dplx = 1;
1157
1158 mod_timer(&priv->phy_tmr, jiffies + HZ);
1159
1160 if (mii_link_ok(&priv->mii) && !netif_carrier_ok(priv->net_dev)) {
1161 qfec_get_an(dev, &spd, &dplx);
1162 qfec_speed_cfg(dev, spd, dplx);
1163 QFEC_LOG(QFEC_LOG_DBG, "%s: link up, %d spd, %d dplx\n",
1164 __func__, spd, dplx);
1165
1166 netif_carrier_on(dev);
1167 }
1168
1169 else if (!mii_link_ok(&priv->mii) && netif_carrier_ok(priv->net_dev)) {
1170 QFEC_LOG(QFEC_LOG_DBG, "%s: link down\n", __func__);
1171 netif_carrier_off(dev);
1172 }
1173}
1174
1175/*
1176 * dealloc buffer descriptor memory
1177 */
1178
1179static void qfec_mem_dealloc(struct net_device *dev)
1180{
1181 struct qfec_priv *priv = netdev_priv(dev);
1182
1183 dma_free_coherent(&dev->dev,
1184 priv->bd_size, priv->bd_base, priv->tbd_dma);
1185 priv->bd_base = 0;
1186}
1187
1188/*
1189 * allocate shared device memory for TX/RX buf-desc (and buffers)
1190 */
1191
1192static int qfec_mem_alloc(struct net_device *dev)
1193{
1194 struct qfec_priv *priv = netdev_priv(dev);
1195
1196 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1197
1198 priv->bd_size =
1199 (priv->n_tbd + priv->n_rbd) * sizeof(struct qfec_buf_desc);
1200
1201 priv->p_tbd = kcalloc(priv->n_tbd, sizeof(struct buf_desc), GFP_KERNEL);
1202 if (!priv->p_tbd) {
1203 QFEC_LOG_ERR("%s: kcalloc failed p_tbd\n", __func__);
1204 return -ENOMEM;
1205 }
1206
1207 priv->p_rbd = kcalloc(priv->n_rbd, sizeof(struct buf_desc), GFP_KERNEL);
1208 if (!priv->p_rbd) {
1209 QFEC_LOG_ERR("%s: kcalloc failed p_rbd\n", __func__);
1210 return -ENOMEM;
1211 }
1212
1213 /* alloc mem for buf-desc, if not already alloc'd */
1214 if (!priv->bd_base) {
1215 priv->bd_base = dma_alloc_coherent(&dev->dev,
1216 priv->bd_size, &priv->tbd_dma,
1217 GFP_KERNEL | __GFP_DMA);
1218 }
1219
1220 if (!priv->bd_base) {
1221 QFEC_LOG_ERR("%s: dma_alloc_coherent failed\n", __func__);
1222 return -ENOMEM;
1223 }
1224
1225 priv->rbd_dma = priv->tbd_dma
1226 + (priv->n_tbd * sizeof(struct qfec_buf_desc));
1227
1228 QFEC_LOG(QFEC_LOG_DBG,
1229 " %s: 0x%08x size, %d n_tbd, %d n_rbd\n",
1230 __func__, priv->bd_size, priv->n_tbd, priv->n_rbd);
1231
1232 return 0;
1233}
1234
1235/*
1236 * display buffer descriptors
1237 */
1238
1239static int qfec_bd_fmt(char *buf, int size, struct buf_desc *p_bd)
1240{
1241 return snprintf(buf, size,
1242 "%8p: %08x %08x %8p %8p %8p %8p %8p %x",
1243 p_bd, qfec_bd_status_get(p_bd),
1244 qfec_bd_ctl_get(p_bd), qfec_bd_pbuf_get(p_bd),
1245 qfec_bd_next_get(p_bd), qfec_bd_skbuf_get(p_bd),
1246 qfec_bd_virt_get(p_bd), qfec_bd_phys_get(p_bd),
1247 qfec_bd_last_bd(p_bd));
1248}
1249
1250static int qfec_bd_show(char *buf, int count, struct buf_desc *p_bd, int n_bd,
1251 struct ring *p_ring, char *label)
1252{
1253 int l = 0;
1254 int n;
1255
1256 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, label);
1257
1258 l += snprintf(&buf[l], count, "%s: %s\n", __func__, label);
1259 if (!p_bd)
1260 return l;
1261
1262 n_bd = n_bd > MAX_N_BD ? MAX_N_BD : n_bd;
1263
1264 for (n = 0; n < n_bd; n++, p_bd++) {
1265 l += qfec_bd_fmt(&buf[l], count - l, p_bd);
1266 l += snprintf(&buf[l], count - l, "%s%s\n",
1267 (qfec_ring_head(p_ring) == n ? " < h" : ""),
1268 (qfec_ring_tail(p_ring) == n ? " < t" : ""));
1269 }
1270
1271 return l;
1272}
1273
1274/*
1275 * display TX BDs
1276 */
1277static int qfec_bd_tx_show(struct device *dev, struct device_attribute *attr,
1278 char *buf)
1279{
1280 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1281 int count = PAGE_SIZE;
1282
1283 return qfec_bd_show(buf, count, priv->p_tbd, priv->n_tbd,
1284 &priv->ring_tbd, "TX");
1285}
1286
1287/*
1288 * display RX BDs
1289 */
1290static int qfec_bd_rx_show(struct device *dev, struct device_attribute *attr,
1291 char *buf)
1292{
1293 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1294 int count = PAGE_SIZE;
1295
1296 return qfec_bd_show(buf, count, priv->p_rbd, priv->n_rbd,
1297 &priv->ring_rbd, "RX");
1298}
1299
1300/*
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001301 * process timestamp values
1302 * The pbuf and next fields of the buffer descriptors are overwritten
1303 * with the timestamp high and low register values.
1304 *
1305 * The low register is incremented by the value in the subsec_increment
1306 * register and overflows at 0x8000 0000 causing the high register to
1307 * increment.
1308 *
1309 * The subsec_increment register is recommended to be set to the number
1310 * of nanosec corresponding to each clock tic, scaled by 2^31 / 10^9
1311 * (e.g. 40 * 2^32 / 10^9 = 85.9, or 86 for 25 MHz). However, the
1312 * rounding error in this case will result in a 1 sec error / ~14 mins.
1313 *
1314 * An alternate approach is used. The subsec_increment is set to 1,
1315 * and the concatenation of the 2 timestamp registers used to count
1316 * clock tics. The 63-bit result is manipulated to determine the number
1317 * of sec and ns.
1318 */
1319
1320/*
1321 * convert 19.2 MHz clock tics into sec/ns
1322 */
1323#define TS_LOW_REG_BITS 31
1324
1325#define MILLION 1000000UL
1326#define BILLION 1000000000UL
1327
1328#define F_CLK 19200000UL
1329#define F_CLK_PRE_SC 24
1330#define F_CLK_INV_Q 56
1331#define F_CLK_INV (((unsigned long long)1 << F_CLK_INV_Q) / F_CLK)
1332#define F_CLK_TO_NS_Q 25
1333#define F_CLK_TO_NS \
1334 (((((unsigned long long)1<<F_CLK_TO_NS_Q)*BILLION)+(F_CLK-1))/F_CLK)
1335#define US_TO_F_CLK_Q 20
1336#define US_TO_F_CLK \
1337 (((((unsigned long long)1<<US_TO_F_CLK_Q)*F_CLK)+(MILLION-1))/MILLION)
1338
1339static inline void qfec_get_sec(uint64_t *cnt,
1340 uint32_t *sec, uint32_t *ns)
1341{
1342 unsigned long long t;
1343 unsigned long long subsec;
1344
1345 t = *cnt >> F_CLK_PRE_SC;
1346 t *= F_CLK_INV;
1347 t >>= F_CLK_INV_Q - F_CLK_PRE_SC;
1348 *sec = t;
1349
1350 t = *cnt - (t * F_CLK);
1351 subsec = t;
1352
1353 if (subsec >= F_CLK) {
1354 subsec -= F_CLK;
1355 *sec += 1;
1356 }
1357
1358 subsec *= F_CLK_TO_NS;
1359 subsec >>= F_CLK_TO_NS_Q;
1360 *ns = subsec;
1361}
1362
1363/*
1364 * read ethernet timestamp registers, pass up raw register values
1365 * and values converted to sec/ns
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001366 */
1367static void qfec_read_timestamp(struct buf_desc *p_bd,
1368 struct skb_shared_hwtstamps *ts)
1369{
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001370 unsigned long long cnt;
1371 unsigned int sec;
1372 unsigned int subsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001373
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001374 cnt = (unsigned long)qfec_bd_next_get(p_bd);
1375 cnt <<= TS_LOW_REG_BITS;
1376 cnt |= (unsigned long)qfec_bd_pbuf_get(p_bd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001378 /* report raw counts as concatenated 63 bits */
1379 sec = cnt >> 32;
1380 subsec = cnt & 0xffffffff;
1381
1382 ts->hwtstamp = ktime_set(sec, subsec);
1383
1384 /* translate counts to sec and ns */
1385 qfec_get_sec(&cnt, &sec, &subsec);
1386
1387 ts->syststamp = ktime_set(sec, subsec);
1388}
1389
1390/*
1391 * capture the current system time in the timestamp registers
1392 */
1393static int qfec_cmd(struct device *dev, struct device_attribute *attr,
1394 const char *buf, size_t count)
1395{
1396 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1397 struct timeval tv;
1398
1399 if (!strncmp(buf, "setTs", 5)) {
1400 unsigned long long cnt;
1401 uint32_t ts_hi;
1402 uint32_t ts_lo;
1403 unsigned long long subsec;
1404
1405 do_gettimeofday(&tv);
1406
1407 /* convert raw sec/usec to ns */
1408 subsec = tv.tv_usec;
1409 subsec *= US_TO_F_CLK;
1410 subsec >>= US_TO_F_CLK_Q;
1411
1412 cnt = tv.tv_sec;
1413 cnt *= F_CLK;
1414 cnt += subsec;
1415
1416 ts_hi = cnt >> 31;
1417 ts_lo = cnt & 0x7FFFFFFF;
1418
1419 qfec_reg_write(priv, TS_HI_UPDT_REG, ts_hi);
1420 qfec_reg_write(priv, TS_LO_UPDT_REG, ts_lo);
1421
1422 qfec_reg_write(priv, TS_CTL_REG,
1423 qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSINIT);
1424 } else
1425 pr_err("%s: unknown cmd, %s.\n", __func__, buf);
1426
1427 return strnlen(buf, count);
1428}
1429
1430/*
1431 * display ethernet tstamp and system time
1432 */
1433static int qfec_tstamp_show(struct device *dev, struct device_attribute *attr,
1434 char *buf)
1435{
1436 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1437 int count = PAGE_SIZE;
1438 int l;
1439 struct timeval tv;
1440 unsigned long long cnt;
1441 uint32_t sec;
1442 uint32_t ns;
1443 uint32_t ts_hi;
1444 uint32_t ts_lo;
1445
1446 /* insure that ts_hi didn't increment during read */
1447 do {
1448 ts_hi = qfec_reg_read(priv, TS_HIGH_REG);
1449 ts_lo = qfec_reg_read(priv, TS_LOW_REG);
1450 } while (ts_hi != qfec_reg_read(priv, TS_HIGH_REG));
1451
1452 cnt = ts_hi;
1453 cnt <<= TS_LOW_REG_BITS;
1454 cnt |= ts_lo;
1455
1456 do_gettimeofday(&tv);
1457
1458 ts_hi = cnt >> 32;
1459 ts_lo = cnt & 0xffffffff;
1460
1461 qfec_get_sec(&cnt, &sec, &ns);
1462
1463 l = snprintf(buf, count,
1464 "%12u.%09u sec 0x%08x 0x%08x tstamp %12u.%06u time-of-day\n",
1465 sec, ns, ts_hi, ts_lo, (int)tv.tv_sec, (int)tv.tv_usec);
1466
1467 return l;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468}
1469
1470/*
1471 * free transmitted skbufs from buffer-descriptor no owned by HW
1472 */
1473static int qfec_tx_replenish(struct net_device *dev)
1474{
1475 struct qfec_priv *priv = netdev_priv(dev);
1476 struct ring *p_ring = &priv->ring_tbd;
1477 struct buf_desc *p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1478 struct sk_buff *skb;
1479 unsigned long flags;
1480
1481 CNTR_INC(priv, tx_replenish);
1482
1483 spin_lock_irqsave(&priv->xmit_lock, flags);
1484
1485 while (!qfec_ring_empty(p_ring)) {
1486 if (qfec_bd_own(p_bd))
1487 break; /* done for now */
1488
1489 skb = qfec_bd_skbuf_get(p_bd);
1490 if (unlikely(skb == NULL)) {
1491 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1492 CNTR_INC(priv, tx_skb_null);
1493 break;
1494 }
1495
1496 qfec_reg_write(priv, STATUS_REG,
1497 STATUS_REG_TU | STATUS_REG_TI);
1498
1499 /* retrieve timestamp if requested */
1500 if (qfec_bd_status_get(p_bd) & BUF_TX_TTSS) {
1501 CNTR_INC(priv, ts_tx_rtn);
1502 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1503 skb_tstamp_tx(skb, skb_hwtstamps(skb));
1504 }
1505
1506 /* update statistics before freeing skb */
1507 priv->stats.tx_packets++;
1508 priv->stats.tx_bytes += skb->len;
1509
1510 dma_unmap_single(&dev->dev, (dma_addr_t) qfec_bd_pbuf_get(p_bd),
1511 skb->len, DMA_TO_DEVICE);
1512
1513 dev_kfree_skb_any(skb);
1514 qfec_bd_skbuf_set(p_bd, NULL);
1515
1516 qfec_ring_tail_adv(p_ring);
1517 p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1518 }
1519
1520 spin_unlock_irqrestore(&priv->xmit_lock, flags);
1521
1522 qfec_queue_start(dev);
1523
1524 return 0;
1525}
1526
1527/*
1528 * clear ownership bits of all TX buf-desc and release the sk-bufs
1529 */
1530static void qfec_tx_timeout(struct net_device *dev)
1531{
1532 struct qfec_priv *priv = netdev_priv(dev);
1533 struct buf_desc *bd = priv->p_tbd;
1534 int n;
1535
1536 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1537 CNTR_INC(priv, tx_timeout);
1538
1539 for (n = 0; n < priv->n_tbd; n++, bd++)
1540 qfec_bd_own_clr(bd);
1541
1542 qfec_tx_replenish(dev);
1543}
1544
1545/*
1546 * rx() - process a received frame
1547 */
1548static void qfec_rx_int(struct net_device *dev)
1549{
1550 struct qfec_priv *priv = netdev_priv(dev);
1551 struct ring *p_ring = &priv->ring_rbd;
1552 struct buf_desc *p_bd = priv->p_latest_rbd;
1553 uint32_t desc_status;
1554 uint32_t mis_fr_reg;
1555
1556 desc_status = qfec_bd_status_get(p_bd);
1557 mis_fr_reg = qfec_reg_read(priv, MIS_FR_REG);
1558
1559 CNTR_INC(priv, rx_int);
1560
1561 /* check that valid interrupt occurred */
1562 if (unlikely(desc_status & BUF_OWN)) {
1563 char s[100];
1564
1565 qfec_bd_fmt(s, sizeof(s), p_bd);
1566 QFEC_LOG_ERR("%s: owned by DMA, %08x, %s\n", __func__,
1567 qfec_reg_read(priv, CUR_HOST_RX_DES_REG), s);
1568 CNTR_INC(priv, rx_owned);
1569 return;
1570 }
1571
1572 /* accumulate missed-frame count (reg reset when read) */
1573 priv->stats.rx_missed_errors += mis_fr_reg
1574 & MIS_FR_REG_MISS_CNT;
1575
1576 /* process all unowned frames */
1577 while (!(desc_status & BUF_OWN) && (!qfec_ring_full(p_ring))) {
1578 struct sk_buff *skb;
1579 struct buf_desc *p_bd_next;
1580
1581 skb = qfec_bd_skbuf_get(p_bd);
1582
1583 if (unlikely(skb == NULL)) {
1584 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1585 CNTR_INC(priv, rx_skb_null);
1586 break;
1587 }
1588
1589 /* cache coherency before skb->data is accessed */
1590 dma_unmap_single(&dev->dev,
1591 (dma_addr_t) qfec_bd_phys_get(p_bd),
1592 ETH_BUF_SIZE, DMA_FROM_DEVICE);
1593 prefetch(skb->data);
1594
1595 if (unlikely(desc_status & BUF_RX_ES)) {
1596 priv->stats.rx_dropped++;
1597 CNTR_INC(priv, rx_dropped);
1598 dev_kfree_skb(skb);
1599 } else {
1600 qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
1601
1602 skb->len = BUF_RX_FL_GET_FROM_STATUS(desc_status);
1603
1604 if (priv->state & timestamping) {
1605 CNTR_INC(priv, ts_rec);
1606 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1607 }
1608
1609 /* update statistics before freeing skb */
1610 priv->stats.rx_packets++;
1611 priv->stats.rx_bytes += skb->len;
1612
1613 skb->dev = dev;
1614 skb->protocol = eth_type_trans(skb, dev);
1615 skb->ip_summed = CHECKSUM_UNNECESSARY;
1616
1617 if (NET_RX_DROP == netif_rx(skb)) {
1618 priv->stats.rx_dropped++;
1619 CNTR_INC(priv, rx_dropped);
1620 }
1621 CNTR_INC(priv, netif_rx_cntr);
1622 }
1623
1624 if (p_bd != priv->p_ending_rbd)
1625 p_bd_next = p_bd + 1;
1626 else
1627 p_bd_next = priv->p_rbd;
1628 desc_status = qfec_bd_status_get(p_bd_next);
1629
1630 qfec_bd_skbuf_set(p_bd, NULL);
1631
1632 qfec_ring_head_adv(p_ring);
1633 p_bd = p_bd_next;
1634 }
1635
1636 priv->p_latest_rbd = p_bd;
1637
1638 /* replenish bufs */
1639 while (!qfec_ring_empty(p_ring)) {
1640 if (qfec_rbd_init(dev, &priv->p_rbd[qfec_ring_tail(p_ring)]))
1641 break;
1642 qfec_ring_tail_adv(p_ring);
1643 }
1644}
1645
1646/*
1647 * isr() - interrupt service routine
1648 * determine cause of interrupt and invoke/schedule appropriate
1649 * processing or error handling
1650 */
1651#define ISR_ERR_CHK(priv, status, interrupt, cntr) \
1652 if (status & interrupt) \
1653 CNTR_INC(priv, cntr)
1654
1655static irqreturn_t qfec_int(int irq, void *dev_id)
1656{
1657 struct net_device *dev = dev_id;
1658 struct qfec_priv *priv = netdev_priv(dev);
1659 uint32_t status = qfec_reg_read(priv, STATUS_REG);
1660 uint32_t int_bits = STATUS_REG_NIS | STATUS_REG_AIS;
1661
1662 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, dev->name);
1663
1664 /* abnormal interrupt */
1665 if (status & STATUS_REG_AIS) {
1666 QFEC_LOG(QFEC_LOG_DBG, "%s: abnormal status 0x%08x\n",
1667 __func__, status);
1668
1669 ISR_ERR_CHK(priv, status, STATUS_REG_RU, rx_buf_unavail);
1670 ISR_ERR_CHK(priv, status, STATUS_REG_FBI, fatal_bus);
1671
1672 ISR_ERR_CHK(priv, status, STATUS_REG_RWT, rx_watchdog);
1673 ISR_ERR_CHK(priv, status, STATUS_REG_RPS, rx_proc_stopped);
1674 ISR_ERR_CHK(priv, status, STATUS_REG_UNF, tx_underflow);
1675
1676 ISR_ERR_CHK(priv, status, STATUS_REG_OVF, rx_overflow);
1677 ISR_ERR_CHK(priv, status, STATUS_REG_TJT, tx_jabber_tmout);
1678 ISR_ERR_CHK(priv, status, STATUS_REG_TPS, tx_proc_stopped);
1679
1680 int_bits |= STATUS_REG_AIS_BITS;
1681 CNTR_INC(priv, abnorm_int);
1682 }
1683
1684 if (status & STATUS_REG_NIS)
1685 CNTR_INC(priv, norm_int);
1686
1687 /* receive interrupt */
1688 if (status & STATUS_REG_RI) {
1689 CNTR_INC(priv, rx_isr);
1690 qfec_rx_int(dev);
1691 }
1692
1693 /* transmit interrupt */
1694 if (status & STATUS_REG_TI) {
1695 CNTR_INC(priv, tx_isr);
1696 qfec_tx_replenish(dev);
1697 }
1698
1699 /* gmac interrupt */
1700 if (status & (STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI)) {
1701 CNTR_INC(priv, gmac_isr);
1702 int_bits |= STATUS_REG_GMI;
1703 }
1704
1705 /* clear interrupts */
1706 qfec_reg_write(priv, STATUS_REG, int_bits);
1707 CNTR_INC(priv, isr);
1708
1709 return IRQ_HANDLED;
1710}
1711
1712/*
1713 * open () - register system resources (IRQ, DMA, ...)
1714 * turn on HW, perform device setup.
1715 */
1716static int qfec_open(struct net_device *dev)
1717{
1718 struct qfec_priv *priv = netdev_priv(dev);
1719 struct buf_desc *p_bd;
1720 struct ring *p_ring;
1721 struct qfec_buf_desc *p_desc;
1722 int n;
1723 int res = 0;
1724
1725 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1726
1727 if (!dev) {
1728 res = -EINVAL;
1729 goto err;
1730 }
1731
1732 /* allocate TX/RX buffer-descriptors and buffers */
1733
1734 res = qfec_mem_alloc(dev);
1735 if (res)
1736 goto err;
1737
1738 /* initialize TX */
1739 p_desc = priv->bd_base;
1740
1741 for (n = 0, p_bd = priv->p_tbd; n < priv->n_tbd; n++, p_bd++) {
1742 p_bd->p_desc = p_desc++;
1743
1744 if (n == (priv->n_tbd - 1))
1745 qfec_bd_last_bd_set(p_bd);
1746
1747 qfec_bd_own_clr(p_bd); /* clear ownership */
1748 }
1749
1750 qfec_ring_init(&priv->ring_tbd, priv->n_tbd, priv->n_tbd);
1751
1752 priv->tx_ic_mod = priv->n_tbd / TX_BD_TI_RATIO;
1753 if (priv->tx_ic_mod == 0)
1754 priv->tx_ic_mod = 1;
1755
1756 /* initialize RX buffer descriptors and allocate sk_bufs */
1757 p_ring = &priv->ring_rbd;
1758 qfec_ring_init(p_ring, priv->n_rbd, 0);
1759 qfec_bd_last_bd_set(&priv->p_rbd[priv->n_rbd - 1]);
1760
1761 for (n = 0, p_bd = priv->p_rbd; n < priv->n_rbd; n++, p_bd++) {
1762 p_bd->p_desc = p_desc++;
1763
1764 if (qfec_rbd_init(dev, p_bd))
1765 break;
1766 qfec_ring_tail_adv(p_ring);
1767 }
1768
1769 priv->p_latest_rbd = priv->p_rbd;
1770 priv->p_ending_rbd = priv->p_rbd + priv->n_rbd - 1;
1771
1772 /* config ptp clock */
1773 qfec_ptp_cfg(priv);
1774
1775 /* configure PHY - must be set before reset/hw_init */
1776 qfec_intf_sel(priv, intfc_mii);
1777
1778 /* initialize controller after BDs allocated */
1779 res = qfec_hw_init(priv);
1780 if (res)
1781 goto err1;
1782
1783 /* get/set (primary) MAC address */
1784 qfec_set_adr_regs(priv, dev->dev_addr);
1785
1786 /* start phy monitor */
1787 QFEC_LOG(QFEC_LOG_DBG, " %s: start timer\n", __func__);
1788 netif_carrier_off(priv->net_dev);
1789 setup_timer(&priv->phy_tmr, qfec_phy_monitor, (unsigned long)dev);
1790 mod_timer(&priv->phy_tmr, jiffies + HZ);
1791
1792 /* initialize interrupts */
1793 QFEC_LOG(QFEC_LOG_DBG, " %s: request irq %d\n", __func__, dev->irq);
1794 res = request_irq(dev->irq, qfec_int, 0, dev->name, dev);
1795 if (res)
1796 goto err1;
1797
1798 /* enable controller */
1799 qfec_hw_enable(priv);
1800 netif_start_queue(dev);
1801
1802 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x link, %08x carrier\n", __func__,
1803 mii_link_ok(&priv->mii), netif_carrier_ok(priv->net_dev));
1804
1805 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1806 return 0;
1807
1808err1:
1809 qfec_mem_dealloc(dev);
1810err:
1811 QFEC_LOG_ERR("%s: error - %d\n", __func__, res);
1812 return res;
1813}
1814
1815/*
1816 * stop() - "reverse operations performed at open time"
1817 */
1818static int qfec_stop(struct net_device *dev)
1819{
1820 struct qfec_priv *priv = netdev_priv(dev);
1821 struct buf_desc *p_bd;
1822 struct sk_buff *skb;
1823 int n;
1824
1825 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1826
1827 del_timer_sync(&priv->phy_tmr);
1828
1829 qfec_hw_disable(priv);
1830 qfec_queue_stop(dev);
1831 free_irq(dev->irq, dev);
1832
1833 /* free all pending sk_bufs */
1834 for (n = priv->n_rbd, p_bd = priv->p_rbd; n > 0; n--, p_bd++) {
1835 skb = qfec_bd_skbuf_get(p_bd);
1836 if (skb)
1837 dev_kfree_skb(skb);
1838 }
1839
1840 for (n = priv->n_tbd, p_bd = priv->p_tbd; n > 0; n--, p_bd++) {
1841 skb = qfec_bd_skbuf_get(p_bd);
1842 if (skb)
1843 dev_kfree_skb(skb);
1844 }
1845
1846 qfec_mem_dealloc(dev);
1847
1848 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1849
1850 return 0;
1851}
1852
1853static int qfec_set_config(struct net_device *dev, struct ifmap *map)
1854{
1855 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1856 return 0;
1857}
1858
1859/*
1860 * pass data from skbuf to buf-desc
1861 */
1862static int qfec_xmit(struct sk_buff *skb, struct net_device *dev)
1863{
1864 struct qfec_priv *priv = netdev_priv(dev);
1865 struct ring *p_ring = &priv->ring_tbd;
1866 struct buf_desc *p_bd;
1867 uint32_t ctrl = 0;
1868 int ret = NETDEV_TX_OK;
1869 unsigned long flags;
1870
1871 CNTR_INC(priv, xmit);
1872
1873 spin_lock_irqsave(&priv->xmit_lock, flags);
1874
1875 /* stop queuing if no resources available */
1876 if (qfec_ring_room(p_ring) == 0) {
1877 qfec_queue_stop(dev);
1878 CNTR_INC(priv, tx_no_resource);
1879
1880 ret = NETDEV_TX_BUSY;
1881 goto done;
1882 }
1883
1884 /* locate and save *sk_buff */
1885 p_bd = &priv->p_tbd[qfec_ring_head(p_ring)];
1886 qfec_bd_skbuf_set(p_bd, skb);
1887
1888 /* set DMA ptr to sk_buff data and write cache to memory */
1889 qfec_bd_pbuf_set(p_bd, (void *)
1890 dma_map_single(&dev->dev,
1891 (void *)skb->data, skb->len, DMA_TO_DEVICE));
1892
1893 ctrl = skb->len;
1894 if (!(qfec_ring_head(p_ring) % priv->tx_ic_mod))
1895 ctrl |= BUF_TX_IC; /* interrupt on complete */
1896
1897 /* check if timestamping enabled and requested */
1898 if (priv->state & timestamping) {
1899 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1900 CNTR_INC(priv, ts_tx_en);
1901 ctrl |= BUF_TX_IC; /* interrupt on complete */
1902 ctrl |= BUF_TX_TTSE; /* enable timestamp */
1903 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1904 }
1905 }
1906
1907 if (qfec_bd_last_bd(p_bd))
1908 ctrl |= BUF_RX_RER;
1909
1910 /* no gather, no multi buf frames */
1911 ctrl |= BUF_TX_FS | BUF_TX_LS; /* 1st and last segment */
1912
1913 qfec_bd_ctl_wr(p_bd, ctrl);
1914 qfec_bd_status_set(p_bd, BUF_OWN);
1915
1916 qfec_ring_head_adv(p_ring);
1917 qfec_reg_write(priv, TX_POLL_DEM_REG, 1); /* poll */
1918
1919done:
1920 spin_unlock_irqrestore(&priv->xmit_lock, flags);
1921
1922 return ret;
1923}
1924
1925static int qfec_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1926{
1927 struct qfec_priv *priv = netdev_priv(dev);
1928 struct hwtstamp_config *cfg = (struct hwtstamp_config *) ifr;
1929
1930 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1931
1932 if (cmd == SIOCSHWTSTAMP) {
1933 CNTR_INC(priv, ts_ioctl);
1934 QFEC_LOG(QFEC_LOG_DBG,
1935 "%s: SIOCSHWTSTAMP - %x flags %x tx %x rx\n",
1936 __func__, cfg->flags, cfg->tx_type, cfg->rx_filter);
1937
1938 cfg->flags = 0;
1939 cfg->tx_type = HWTSTAMP_TX_ON;
1940 cfg->rx_filter = HWTSTAMP_FILTER_ALL;
1941
1942 priv->state |= timestamping;
1943 qfec_reg_write(priv, TS_CTL_REG,
1944 qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSENALL);
1945
1946 return 0;
1947 }
1948
1949 return generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL);
1950}
1951
1952static struct net_device_stats *qfec_get_stats(struct net_device *dev)
1953{
1954 struct qfec_priv *priv = netdev_priv(dev);
1955
1956 QFEC_LOG(QFEC_LOG_DBG2, "qfec_stats:\n");
1957
1958 return &priv->stats;
1959}
1960
1961/*
1962 * accept new mac address
1963 */
1964static int qfec_set_mac_address(struct net_device *dev, void *p)
1965{
1966 struct qfec_priv *priv = netdev_priv(dev);
1967 struct sockaddr *addr = p;
1968
1969 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1970
1971 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1972
1973 qfec_set_adr_regs(priv, dev->dev_addr);
1974
1975 return 0;
1976}
1977
1978/*
1979 * read discontinuous MAC address from corrected fuse memory region
1980 */
1981
1982static int qfec_get_mac_address(char *buf, char *mac_base, int nBytes)
1983{
1984 static int offset[] = { 0, 1, 2, 3, 4, 8 };
1985 int n;
1986
1987 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1988
1989 for (n = 0; n < nBytes; n++)
1990 buf[n] = ioread8(mac_base + offset[n]);
1991
1992 /* check that MAC programmed */
1993 if ((buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5]) == 0) {
1994 QFEC_LOG_ERR("%s: null MAC address\n", __func__);
1995 return -ENODATA;
1996 }
1997
1998 return 0;
1999}
2000
2001/*
2002 * static definition of driver functions
2003 */
2004static const struct net_device_ops qfec_netdev_ops = {
2005 .ndo_open = qfec_open,
2006 .ndo_stop = qfec_stop,
2007 .ndo_start_xmit = qfec_xmit,
2008
2009 .ndo_do_ioctl = qfec_do_ioctl,
2010 .ndo_tx_timeout = qfec_tx_timeout,
2011 .ndo_set_mac_address = qfec_set_mac_address,
2012
2013 .ndo_change_mtu = eth_change_mtu,
2014 .ndo_validate_addr = eth_validate_addr,
2015
2016 .ndo_get_stats = qfec_get_stats,
2017 .ndo_set_config = qfec_set_config,
2018};
2019
2020/*
2021 * ethtool functions
2022 */
2023
2024static int qfec_nway_reset(struct net_device *dev)
2025{
2026 struct qfec_priv *priv = netdev_priv(dev);
2027 return mii_nway_restart(&priv->mii);
2028}
2029
2030/*
2031 * speed, duplex, auto-neg settings
2032 */
2033static void qfec_ethtool_getpauseparam(struct net_device *dev,
2034 struct ethtool_pauseparam *pp)
2035{
2036 struct qfec_priv *priv = netdev_priv(dev);
2037 u32 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
2038 u32 advert;
2039
2040 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2041
2042 /* report current settings */
2043 pp->tx_pause = (flow & FLOW_CONTROL_TFE) != 0;
2044 pp->rx_pause = (flow & FLOW_CONTROL_RFE) != 0;
2045
2046 /* report if pause is being advertised */
2047 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
2048 pp->autoneg =
2049 (advert & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2050}
2051
2052static int qfec_ethtool_setpauseparam(struct net_device *dev,
2053 struct ethtool_pauseparam *pp)
2054{
2055 struct qfec_priv *priv = netdev_priv(dev);
2056 u32 advert;
2057
2058 QFEC_LOG(QFEC_LOG_DBG, "%s: %d aneg, %d rx, %d tx\n", __func__,
2059 pp->autoneg, pp->rx_pause, pp->tx_pause);
2060
2061 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
2062 advert &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2063
2064 /* If pause autonegotiation is enabled, but both rx and tx are not
2065 * because neither was specified in the ethtool cmd,
2066 * enable both symetrical and asymetrical pause.
2067 * otherwise, only enable the pause mode indicated by rx/tx.
2068 */
2069 if (pp->autoneg) {
2070 if (pp->rx_pause)
2071 advert |= ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP;
2072 else if (pp->tx_pause)
2073 advert |= ADVERTISE_PAUSE_ASYM;
2074 else
2075 advert |= ADVERTISE_PAUSE_CAP;
2076 }
2077
2078 qfec_mdio_write(dev, priv->phy_id, MII_ADVERTISE, advert);
2079
2080 return 0;
2081}
2082
2083/*
2084 * ethtool ring parameter (-g/G) support
2085 */
2086
2087/*
2088 * setringparamam - change the tx/rx ring lengths
2089 */
2090#define MIN_RING_SIZE 3
2091#define MAX_RING_SIZE 1000
2092static int qfec_ethtool_setringparam(struct net_device *dev,
2093 struct ethtool_ringparam *ring)
2094{
2095 struct qfec_priv *priv = netdev_priv(dev);
2096 u32 timeout = 20;
2097
2098 /* notify stack the link is down */
2099 netif_carrier_off(dev);
2100
2101 /* allow tx to complete & free skbufs on the tx ring */
2102 do {
2103 usleep_range(10000, 100000);
2104 qfec_tx_replenish(dev);
2105
2106 if (timeout-- == 0) {
2107 QFEC_LOG_ERR("%s: timeout\n", __func__);
2108 return -ETIME;
2109 }
2110 } while (!qfec_ring_empty(&priv->ring_tbd));
2111
2112
2113 qfec_stop(dev);
2114
2115 /* set tx ring size */
2116 if (ring->tx_pending < MIN_RING_SIZE)
2117 ring->tx_pending = MIN_RING_SIZE;
2118 else if (ring->tx_pending > MAX_RING_SIZE)
2119 ring->tx_pending = MAX_RING_SIZE;
2120 priv->n_tbd = ring->tx_pending;
2121
2122 /* set rx ring size */
2123 if (ring->rx_pending < MIN_RING_SIZE)
2124 ring->rx_pending = MIN_RING_SIZE;
2125 else if (ring->rx_pending > MAX_RING_SIZE)
2126 ring->rx_pending = MAX_RING_SIZE;
2127 priv->n_rbd = ring->rx_pending;
2128
2129
2130 qfec_open(dev);
2131
2132 return 0;
2133}
2134
2135/*
2136 * getringparamam - returns local values
2137 */
2138static void qfec_ethtool_getringparam(struct net_device *dev,
2139 struct ethtool_ringparam *ring)
2140{
2141 struct qfec_priv *priv = netdev_priv(dev);
2142
2143 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2144
2145 ring->rx_max_pending = MAX_RING_SIZE;
2146 ring->rx_mini_max_pending = 0;
2147 ring->rx_jumbo_max_pending = 0;
2148 ring->tx_max_pending = MAX_RING_SIZE;
2149
2150 ring->rx_pending = priv->n_rbd;
2151 ring->rx_mini_pending = 0;
2152 ring->rx_jumbo_pending = 0;
2153 ring->tx_pending = priv->n_tbd;
2154}
2155
2156/*
2157 * speed, duplex, auto-neg settings
2158 */
2159static int
2160qfec_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2161{
2162 struct qfec_priv *priv = netdev_priv(dev);
2163
2164 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2165
2166 cmd->maxrxpkt = priv->n_rbd;
2167 cmd->maxtxpkt = priv->n_tbd;
2168
2169 return mii_ethtool_gset(&priv->mii, cmd);
2170}
2171
2172static int
2173qfec_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2174{
2175 struct qfec_priv *priv = netdev_priv(dev);
2176
2177 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2178
2179 return mii_ethtool_sset(&priv->mii, cmd);
2180}
2181
2182/*
2183 * msg/debug level
2184 */
2185static u32 qfec_ethtool_getmsglevel(struct net_device *dev)
2186{
2187 return qfec_debug;
2188}
2189
2190static void qfec_ethtool_setmsglevel(struct net_device *dev, u32 level)
2191{
2192 qfec_debug ^= level; /* toggle on/off */
2193}
2194
2195/*
2196 * register dump
2197 */
2198#define DMA_DMP_OFFSET 0x0000
2199#define DMA_REG_OFFSET 0x1000
2200#define DMA_REG_LEN 23
2201
2202#define MAC_DMP_OFFSET 0x0080
2203#define MAC_REG_OFFSET 0x0000
2204#define MAC_REG_LEN 55
2205
2206#define TS_DMP_OFFSET 0x0180
2207#define TS_REG_OFFSET 0x0700
2208#define TS_REG_LEN 15
2209
2210#define MDIO_DMP_OFFSET 0x0200
2211#define MDIO_REG_LEN 16
2212
2213#define REG_SIZE (MDIO_DMP_OFFSET + (MDIO_REG_LEN * sizeof(short)))
2214
2215static int qfec_ethtool_getregs_len(struct net_device *dev)
2216{
2217 return REG_SIZE;
2218}
2219
2220static void
2221qfec_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
2222 void *buf)
2223{
2224 struct qfec_priv *priv = netdev_priv(dev);
2225 u32 *data = buf;
2226 u16 *data16;
2227 unsigned int i;
2228 unsigned int j;
2229 unsigned int n;
2230
2231 memset(buf, 0, REG_SIZE);
2232
2233 j = DMA_DMP_OFFSET / sizeof(u32);
2234 for (i = DMA_REG_OFFSET, n = DMA_REG_LEN; n--; i += sizeof(u32))
2235 data[j++] = htonl(qfec_reg_read(priv, i));
2236
2237 j = MAC_DMP_OFFSET / sizeof(u32);
2238 for (i = MAC_REG_OFFSET, n = MAC_REG_LEN; n--; i += sizeof(u32))
2239 data[j++] = htonl(qfec_reg_read(priv, i));
2240
2241 j = TS_DMP_OFFSET / sizeof(u32);
2242 for (i = TS_REG_OFFSET, n = TS_REG_LEN; n--; i += sizeof(u32))
2243 data[j++] = htonl(qfec_reg_read(priv, i));
2244
2245 data16 = (u16 *)&data[MDIO_DMP_OFFSET / sizeof(u32)];
2246 for (i = 0, n = 0; i < MDIO_REG_LEN; i++)
2247 data16[n++] = htons(qfec_mdio_read(dev, 0, i));
2248
2249 regs->len = REG_SIZE;
2250
2251 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, regs->len);
2252}
2253
2254/*
2255 * statistics
2256 * return counts of various ethernet activity.
2257 * many of these are same as in struct net_device_stats
2258 *
2259 * missed-frames indicates the number of attempts made by the ethernet
2260 * controller to write to a buffer-descriptor when the BD ownership
2261 * bit was not set. The rxfifooverflow counter (0x1D4) is not
2262 * available. The Missed Frame and Buffer Overflow Counter register
2263 * (0x1020) is used, but has only 16-bits and is reset when read.
2264 * It is read and updates the value in priv->stats.rx_missed_errors
2265 * in qfec_rx_int().
2266 */
2267static char qfec_stats_strings[][ETH_GSTRING_LEN] = {
2268 "TX good/bad Bytes ",
2269 "TX Bytes ",
2270 "TX good/bad Frames ",
2271 "TX Bcast Frames ",
2272 "TX Mcast Frames ",
2273 "TX Unicast Frames ",
2274 "TX Pause Frames ",
2275 "TX Vlan Frames ",
2276 "TX Frames 64 ",
2277 "TX Frames 65-127 ",
2278 "TX Frames 128-255 ",
2279 "TX Frames 256-511 ",
2280 "TX Frames 512-1023 ",
2281 "TX Frames 1024+ ",
2282 "TX Pause Frames ",
2283 "TX Collisions ",
2284 "TX Late Collisions ",
2285 "TX Excessive Collisions ",
2286
2287 "RX good/bad Bytes ",
2288 "RX Bytes ",
2289 "RX good/bad Frames ",
2290 "RX Bcast Frames ",
2291 "RX Mcast Frames ",
2292 "RX Unicast Frames ",
2293 "RX Pause Frames ",
2294 "RX Vlan Frames ",
2295 "RX Frames 64 ",
2296 "RX Frames 65-127 ",
2297 "RX Frames 128-255 ",
2298 "RX Frames 256-511 ",
2299 "RX Frames 512-1023 ",
2300 "RX Frames 1024+ ",
2301 "RX Pause Frames ",
2302 "RX Crc error Frames ",
2303 "RX Length error Frames ",
2304 "RX Alignment error Frames ",
2305 "RX Runt Frames ",
2306 "RX Oversize Frames ",
2307 "RX Missed Frames ",
2308
2309};
2310
2311static u32 qfec_stats_regs[] = {
2312
2313 69, 89, 70, 71, 72, 90, 92, 93,
2314 73, 74, 75, 76, 77, 78, 92, 84,
2315 86, 87,
2316
2317 97, 98, 96, 99, 100, 113, 116, 118,
2318 107, 108, 109, 110, 111, 112, 116, 101,
2319 114, 102, 103, 106
2320};
2321
2322static int qfec_stats_show(struct device *dev, struct device_attribute *attr,
2323 char *buf)
2324{
2325 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
2326 int count = PAGE_SIZE;
2327 int l = 0;
2328 int n;
2329
2330 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
2331
2332 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++) {
2333 l += snprintf(&buf[l], count - l, " %12u %s\n",
2334 qfec_reg_read(priv,
2335 qfec_stats_regs[n] * sizeof(uint32_t)),
2336 qfec_stats_strings[n]);
2337 }
2338
2339 return l;
2340}
2341
2342static int qfec_get_sset_count(struct net_device *dev, int sset)
2343{
2344 switch (sset) {
2345 case ETH_SS_STATS:
2346 return ARRAY_SIZE(qfec_stats_regs) + 1; /* missed frames */
2347
2348 default:
2349 return -EOPNOTSUPP;
2350 }
2351}
2352
2353static void qfec_ethtool_getstrings(struct net_device *dev, u32 stringset,
2354 u8 *buf)
2355{
2356 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__,
2357 sizeof(qfec_stats_strings));
2358
2359 memcpy(buf, qfec_stats_strings, sizeof(qfec_stats_strings));
2360}
2361
2362static void qfec_ethtool_getstats(struct net_device *dev,
2363 struct ethtool_stats *stats, uint64_t *data)
2364{
2365 struct qfec_priv *priv = netdev_priv(dev);
2366 int j = 0;
2367 int n;
2368
2369 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)
2370 data[j++] = qfec_reg_read(priv,
2371 qfec_stats_regs[n] * sizeof(uint32_t));
2372
2373 data[j++] = priv->stats.rx_missed_errors;
2374
2375 stats->n_stats = j;
2376}
2377
2378static void qfec_ethtool_getdrvinfo(struct net_device *dev,
2379 struct ethtool_drvinfo *info)
2380{
2381 strlcpy(info->driver, QFEC_NAME, sizeof(info->driver));
2382 strlcpy(info->version, QFEC_DRV_VER, sizeof(info->version));
2383 strlcpy(info->bus_info, dev_name(dev->dev.parent),
2384 sizeof(info->bus_info));
2385
2386 info->eedump_len = 0;
2387 info->regdump_len = qfec_ethtool_getregs_len(dev);
2388}
2389
2390/*
2391 * ethtool ops table
2392 */
2393static const struct ethtool_ops qfec_ethtool_ops = {
2394 .nway_reset = qfec_nway_reset,
2395
2396 .get_settings = qfec_ethtool_getsettings,
2397 .set_settings = qfec_ethtool_setsettings,
2398 .get_link = ethtool_op_get_link,
2399 .get_drvinfo = qfec_ethtool_getdrvinfo,
2400 .get_msglevel = qfec_ethtool_getmsglevel,
2401 .set_msglevel = qfec_ethtool_setmsglevel,
2402 .get_regs_len = qfec_ethtool_getregs_len,
2403 .get_regs = qfec_ethtool_getregs,
2404
2405 .get_ringparam = qfec_ethtool_getringparam,
2406 .set_ringparam = qfec_ethtool_setringparam,
2407
2408 .get_pauseparam = qfec_ethtool_getpauseparam,
2409 .set_pauseparam = qfec_ethtool_setpauseparam,
2410
2411 .get_sset_count = qfec_get_sset_count,
2412 .get_strings = qfec_ethtool_getstrings,
2413 .get_ethtool_stats = qfec_ethtool_getstats,
2414};
2415
2416/*
2417 * create sysfs entries
2418 */
2419static DEVICE_ATTR(bd_tx, 0444, qfec_bd_tx_show, NULL);
2420static DEVICE_ATTR(bd_rx, 0444, qfec_bd_rx_show, NULL);
2421static DEVICE_ATTR(cfg, 0444, qfec_config_show, NULL);
2422static DEVICE_ATTR(clk_reg, 0444, qfec_clk_reg_show, NULL);
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002423static DEVICE_ATTR(cmd, 0222, NULL, qfec_cmd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002424static DEVICE_ATTR(cntrs, 0444, qfec_cntrs_show, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002425static DEVICE_ATTR(reg, 0444, qfec_reg_show, NULL);
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002426static DEVICE_ATTR(stats, 0444, qfec_stats_show, NULL);
2427static DEVICE_ATTR(tstamp, 0444, qfec_tstamp_show, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002428
2429static void qfec_sysfs_create(struct net_device *dev)
2430{
2431 if (device_create_file(&(dev->dev), &dev_attr_bd_tx) ||
2432 device_create_file(&(dev->dev), &dev_attr_bd_rx) ||
2433 device_create_file(&(dev->dev), &dev_attr_cfg) ||
2434 device_create_file(&(dev->dev), &dev_attr_clk_reg) ||
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002435 device_create_file(&(dev->dev), &dev_attr_cmd) ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002436 device_create_file(&(dev->dev), &dev_attr_cntrs) ||
2437 device_create_file(&(dev->dev), &dev_attr_reg) ||
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002438 device_create_file(&(dev->dev), &dev_attr_stats) ||
2439 device_create_file(&(dev->dev), &dev_attr_tstamp))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002440 pr_err("qfec_sysfs_create failed to create sysfs files\n");
2441}
2442
2443/*
2444 * map a specified resource
2445 */
2446static int qfec_map_resource(struct platform_device *plat, int resource,
2447 struct resource **priv_res,
2448 void **addr)
2449{
2450 struct resource *res;
2451
2452 QFEC_LOG(QFEC_LOG_DBG, "%s: 0x%x resource\n", __func__, resource);
2453
2454 /* allocate region to access controller registers */
2455 *priv_res = res = platform_get_resource(plat, resource, 0);
2456 if (!res) {
2457 QFEC_LOG_ERR("%s: platform_get_resource failed\n", __func__);
2458 return -ENODEV;
2459 }
2460
2461 res = request_mem_region(res->start, res->end - res->start, QFEC_NAME);
2462 if (!res) {
2463 QFEC_LOG_ERR("%s: request_mem_region failed, %08x %08x\n",
2464 __func__, res->start, res->end - res->start);
2465 return -EBUSY;
2466 }
2467
2468 *addr = ioremap(res->start, res->end - res->start);
2469 if (!*addr)
2470 return -ENOMEM;
2471
2472 QFEC_LOG(QFEC_LOG_DBG, " %s: io mapped from %p to %p\n",
2473 __func__, (void *)res->start, *addr);
2474
2475 return 0;
2476};
2477
2478/*
2479 * free allocated io regions
2480 */
2481static void qfec_free_res(struct resource *res, void *base)
2482{
2483
2484 if (res) {
2485 if (base)
2486 iounmap((void __iomem *)base);
2487
2488 release_mem_region(res->start, res->end - res->start);
2489 }
2490};
2491
2492/*
2493 * probe function that obtain configuration info and allocate net_device
2494 */
2495static int __devinit qfec_probe(struct platform_device *plat)
2496{
2497 struct net_device *dev;
2498 struct qfec_priv *priv;
2499 int ret = 0;
2500
2501 /* allocate device */
2502 dev = alloc_etherdev(sizeof(struct qfec_priv));
2503 if (!dev) {
2504 QFEC_LOG_ERR("%s: alloc_etherdev failed\n", __func__);
2505 ret = -ENOMEM;
2506 goto err;
2507 }
2508
2509 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x dev\n", __func__, (int)dev);
2510
2511 qfec_dev = dev;
2512 SET_NETDEV_DEV(dev, &plat->dev);
2513
2514 dev->netdev_ops = &qfec_netdev_ops;
2515 dev->ethtool_ops = &qfec_ethtool_ops;
2516 dev->watchdog_timeo = 2 * HZ;
2517 dev->irq = platform_get_irq(plat, 0);
2518
2519 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2520
2521 /* initialize private data */
2522 priv = (struct qfec_priv *)netdev_priv(dev);
2523 memset((void *)priv, 0, sizeof(priv));
2524
2525 priv->net_dev = dev;
2526 platform_set_drvdata(plat, dev);
2527
2528 priv->n_tbd = TX_BD_NUM;
2529 priv->n_rbd = RX_BD_NUM;
2530
2531 /* initialize phy structure */
2532 priv->mii.phy_id_mask = 0x1F;
2533 priv->mii.reg_num_mask = 0x1F;
2534 priv->mii.dev = dev;
2535 priv->mii.mdio_read = qfec_mdio_read;
2536 priv->mii.mdio_write = qfec_mdio_write;
2537
2538 /* map register regions */
2539 ret = qfec_map_resource(
2540 plat, IORESOURCE_MEM, &priv->mac_res, &priv->mac_base);
2541 if (ret) {
2542 QFEC_LOG_ERR("%s: IORESOURCE_MEM mac failed\n", __func__);
2543 goto err1;
2544 }
2545
2546 ret = qfec_map_resource(
2547 plat, IORESOURCE_IO, &priv->clk_res, &priv->clk_base);
2548 if (ret) {
2549 QFEC_LOG_ERR("%s: IORESOURCE_IO clk failed\n", __func__);
2550 goto err2;
2551 }
2552
2553 ret = qfec_map_resource(
2554 plat, IORESOURCE_DMA, &priv->fuse_res, &priv->fuse_base);
2555 if (ret) {
2556 QFEC_LOG_ERR("%s: IORESOURCE_DMA fuse failed\n", __func__);
2557 goto err3;
2558 }
2559
2560 /* initialize MAC addr */
2561 ret = qfec_get_mac_address(dev->dev_addr, priv->fuse_base,
2562 MAC_ADDR_SIZE);
2563 if (ret)
2564 goto err4;
2565
2566 QFEC_LOG(QFEC_LOG_DBG, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
2567 __func__,
2568 dev->dev_addr[0], dev->dev_addr[1],
2569 dev->dev_addr[2], dev->dev_addr[3],
2570 dev->dev_addr[4], dev->dev_addr[5]);
2571
2572 ret = register_netdev(dev);
2573 if (ret) {
2574 QFEC_LOG_ERR("%s: register_netdev failed\n", __func__);
2575 goto err4;
2576 }
2577
2578 spin_lock_init(&priv->mdio_lock);
2579 spin_lock_init(&priv->xmit_lock);
2580 qfec_sysfs_create(dev);
2581
2582 return 0;
2583
2584 /* error handling */
2585err4:
2586 qfec_free_res(priv->fuse_res, priv->fuse_base);
2587err3:
2588 qfec_free_res(priv->clk_res, priv->clk_base);
2589err2:
2590 qfec_free_res(priv->mac_res, priv->mac_base);
2591err1:
2592 free_netdev(dev);
2593err:
2594 QFEC_LOG_ERR("%s: err\n", __func__);
2595 return ret;
2596}
2597
2598/*
2599 * module remove
2600 */
2601static int __devexit qfec_remove(struct platform_device *plat)
2602{
2603 struct net_device *dev = platform_get_drvdata(plat);
2604 struct qfec_priv *priv = netdev_priv(dev);
2605
2606 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2607
2608 platform_set_drvdata(plat, NULL);
2609
2610 qfec_free_res(priv->fuse_res, priv->fuse_base);
2611 qfec_free_res(priv->clk_res, priv->clk_base);
2612 qfec_free_res(priv->mac_res, priv->mac_base);
2613
2614 unregister_netdev(dev);
2615 free_netdev(dev);
2616
2617 return 0;
2618}
2619
2620/*
2621 * module support
2622 * the FSM9xxx is not a mobile device does not support power management
2623 */
2624
2625static struct platform_driver qfec_driver = {
2626 .probe = qfec_probe,
2627 .remove = __devexit_p(qfec_remove),
2628 .driver = {
2629 .name = QFEC_NAME,
2630 .owner = THIS_MODULE,
2631 },
2632};
2633
2634/*
2635 * module init
2636 */
2637static int __init qfec_init_module(void)
2638{
2639 int res;
2640
2641 QFEC_LOG(QFEC_LOG_DBG, "%s: %s\n", __func__, qfec_driver.driver.name);
2642
2643 res = platform_driver_register(&qfec_driver);
2644
2645 QFEC_LOG(QFEC_LOG_DBG, "%s: %d - platform_driver_register\n",
2646 __func__, res);
2647
2648 return res;
2649}
2650
2651/*
2652 * module exit
2653 */
2654static void __exit qfec_exit_module(void)
2655{
2656 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2657
2658 platform_driver_unregister(&qfec_driver);
2659}
2660
2661MODULE_DESCRIPTION("FSM Network Driver");
2662MODULE_LICENSE("GPL v2");
2663MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
2664MODULE_VERSION("1.0");
2665
2666module_init(qfec_init_module);
2667module_exit(qfec_exit_module);