blob: d554fec448172849a3de6783cee3cecc6d4d5e57 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14
15#include <linux/platform_device.h>
16
17#include <linux/types.h> /* size_t */
18#include <linux/interrupt.h> /* mark_bh */
19
20#include <linux/netdevice.h> /* struct device, and other headers */
21#include <linux/etherdevice.h> /* eth_type_trans */
22#include <linux/skbuff.h>
23
24#include <linux/proc_fs.h>
25#include <linux/timer.h>
26#include <linux/mii.h>
27
28#include <linux/ethtool.h>
29#include <linux/net_tstamp.h>
30#include <linux/phy.h>
31#include <linux/inet.h>
32
33#include "qfec.h"
34
35#define QFEC_NAME "qfec"
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -070036#define QFEC_DRV_VER "July 14 2011"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define ETH_BUF_SIZE 0x600
39#define MAX_N_BD 50
40#define MAC_ADDR_SIZE 6
41
42#define RX_TX_BD_RATIO 8
43#define RX_BD_NUM 32
44#define TX_BD_NUM (RX_BD_NUM * RX_TX_BD_RATIO)
45#define TX_BD_TI_RATIO 4
46
47/*
48 * logging macros
49 */
50#define QFEC_LOG_PR 1
51#define QFEC_LOG_DBG 2
52#define QFEC_LOG_DBG2 4
53#define QFEC_LOG_MDIO_W 8
54#define QFEC_LOG_MDIO_R 16
55
56static int qfec_debug = QFEC_LOG_PR;
57
58#ifdef QFEC_DEBUG
59# define QFEC_LOG(flag, ...) \
60 do { \
61 if (flag & qfec_debug) \
62 pr_info(__VA_ARGS__); \
63 } while (0)
64#else
65# define QFEC_LOG(flag, ...)
66#endif
67
68#define QFEC_LOG_ERR(...) pr_err(__VA_ARGS__)
69
70/*
71 * driver buffer-descriptor
72 * contains the 4 word HW descriptor plus an additional 4-words.
73 * (See the DSL bits in the BUS-Mode register).
74 */
75#define BD_FLAG_LAST_BD 1
76
77struct buf_desc {
78 struct qfec_buf_desc *p_desc;
79 struct sk_buff *skb;
80 void *buf_virt_addr;
81 void *buf_phys_addr;
82 uint32_t last_bd_flag;
83};
84
85/*
86 *inline functions accessing non-struct qfec_buf_desc elements
87 */
88
89/* skb */
90static inline struct sk_buff *qfec_bd_skbuf_get(struct buf_desc *p_bd)
91{
92 return p_bd->skb;
93};
94
95static inline void qfec_bd_skbuf_set(struct buf_desc *p_bd, struct sk_buff *p)
96{
97 p_bd->skb = p;
98};
99
100/* virtual addr */
101static inline void qfec_bd_virt_set(struct buf_desc *p_bd, void *addr)
102{
103 p_bd->buf_virt_addr = addr;
104};
105
106static inline void *qfec_bd_virt_get(struct buf_desc *p_bd)
107{
108 return p_bd->buf_virt_addr;
109};
110
111/* physical addr */
112static inline void qfec_bd_phys_set(struct buf_desc *p_bd, void *addr)
113{
114 p_bd->buf_phys_addr = addr;
115};
116
117static inline void *qfec_bd_phys_get(struct buf_desc *p_bd)
118{
119 return p_bd->buf_phys_addr;
120};
121
122/* last_bd_flag */
123static inline uint32_t qfec_bd_last_bd(struct buf_desc *p_bd)
124{
125 return (p_bd->last_bd_flag != 0);
126};
127
128static inline void qfec_bd_last_bd_set(struct buf_desc *p_bd)
129{
130 p_bd->last_bd_flag = BD_FLAG_LAST_BD;
131};
132
133/*
134 *inline functions accessing struct qfec_buf_desc elements
135 */
136
137/* ownership bit */
138static inline uint32_t qfec_bd_own(struct buf_desc *p_bd)
139{
140 return p_bd->p_desc->status & BUF_OWN;
141};
142
143static inline void qfec_bd_own_set(struct buf_desc *p_bd)
144{
145 p_bd->p_desc->status |= BUF_OWN ;
146};
147
148static inline void qfec_bd_own_clr(struct buf_desc *p_bd)
149{
150 p_bd->p_desc->status &= ~(BUF_OWN);
151};
152
153static inline uint32_t qfec_bd_status_get(struct buf_desc *p_bd)
154{
155 return p_bd->p_desc->status;
156};
157
158static inline void qfec_bd_status_set(struct buf_desc *p_bd, uint32_t status)
159{
160 p_bd->p_desc->status = status;
161};
162
163static inline uint32_t qfec_bd_status_len(struct buf_desc *p_bd)
164{
165 return BUF_RX_FL_GET((*p_bd->p_desc));
166};
167
168/* control register */
169static inline void qfec_bd_ctl_reset(struct buf_desc *p_bd)
170{
171 p_bd->p_desc->ctl = 0;
172};
173
174static inline uint32_t qfec_bd_ctl_get(struct buf_desc *p_bd)
175{
176 return p_bd->p_desc->ctl;
177};
178
179static inline void qfec_bd_ctl_set(struct buf_desc *p_bd, uint32_t val)
180{
181 p_bd->p_desc->ctl |= val;
182};
183
184static inline void qfec_bd_ctl_wr(struct buf_desc *p_bd, uint32_t val)
185{
186 p_bd->p_desc->ctl = val;
187};
188
189/* pbuf register */
190static inline void *qfec_bd_pbuf_get(struct buf_desc *p_bd)
191{
192 return p_bd->p_desc->p_buf;
193}
194
195static inline void qfec_bd_pbuf_set(struct buf_desc *p_bd, void *p)
196{
197 p_bd->p_desc->p_buf = p;
198}
199
200/* next register */
201static inline void *qfec_bd_next_get(struct buf_desc *p_bd)
202{
203 return p_bd->p_desc->next;
204};
205
206/*
207 * initialize an RX BD w/ a new buf
208 */
209static int qfec_rbd_init(struct net_device *dev, struct buf_desc *p_bd)
210{
211 struct sk_buff *skb;
212 void *p;
213 void *v;
214
215 /* allocate and record ptrs for sk buff */
216 skb = dev_alloc_skb(ETH_BUF_SIZE);
217 if (!skb)
218 goto err;
219
220 qfec_bd_skbuf_set(p_bd, skb);
221
222 v = skb_put(skb, ETH_BUF_SIZE);
223 qfec_bd_virt_set(p_bd, v);
224
225 p = (void *) dma_map_single(&dev->dev,
226 (void *)skb->data, ETH_BUF_SIZE, DMA_FROM_DEVICE);
227 qfec_bd_pbuf_set(p_bd, p);
228 qfec_bd_phys_set(p_bd, p);
229
230 /* populate control register */
231 /* mark the last BD and set end-of-ring bit */
232 qfec_bd_ctl_wr(p_bd, ETH_BUF_SIZE |
233 (qfec_bd_last_bd(p_bd) ? BUF_RX_RER : 0));
234
235 qfec_bd_status_set(p_bd, BUF_OWN);
236
237 if (!(qfec_debug & QFEC_LOG_DBG2))
238 return 0;
239
240 /* debug messages */
241 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p bd\n", __func__, p_bd);
242
243 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p skb\n", __func__, skb);
244
245 QFEC_LOG(QFEC_LOG_DBG2,
246 "%s: %p p_bd, %p data, %p skb_put, %p virt, %p p_buf, %p p\n",
247 __func__, (void *)p_bd,
248 (void *)skb->data, v, /*(void *)skb_put(skb, ETH_BUF_SIZE), */
249 (void *)qfec_bd_virt_get(p_bd), (void *)qfec_bd_pbuf_get(p_bd),
250 (void *)p);
251
252 return 0;
253
254err:
255 return -ENOMEM;
256};
257
258/*
259 * ring structure used to maintain indices of buffer-descriptor (BD) usage
260 *
261 * The RX BDs are normally all pre-allocated with buffers available to be
262 * DMA'd into with received frames. The head indicates the first BD/buffer
263 * containing a received frame, and the tail indicates the oldest BD/buffer
264 * that needs to be restored for use. Head and tail are both initialized
265 * to zero, and n_free is initialized to zero, since all BD are initialized.
266 *
267 * The TX BDs are normally available for use, only being initialized as
268 * TX frames are requested for transmission. The head indicates the
269 * first available BD, and the tail indicate the oldest BD that has
270 * not been acknowledged as transmitted. Head and tail are both initialized
271 * to zero, and n_free is initialized to len, since all are available for use.
272 */
273struct ring {
274 int head;
275 int tail;
276 int n_free;
277 int len;
278};
279
280/* accessory in line functions for struct ring */
281static inline void qfec_ring_init(struct ring *p_ring, int size, int free)
282{
283 p_ring->head = p_ring->tail = 0;
284 p_ring->len = size;
285 p_ring->n_free = free;
286}
287
288static inline int qfec_ring_full(struct ring *p_ring)
289{
290 return (p_ring->n_free == 0);
291};
292
293static inline int qfec_ring_empty(struct ring *p_ring)
294{
295 return (p_ring->n_free == p_ring->len);
296}
297
298static inline void qfec_ring_head_adv(struct ring *p_ring)
299{
300 p_ring->head = ++p_ring->head % p_ring->len;
301 p_ring->n_free--;
302};
303
304static inline void qfec_ring_tail_adv(struct ring *p_ring)
305{
306 p_ring->tail = ++p_ring->tail % p_ring->len;
307 p_ring->n_free++;
308};
309
310static inline int qfec_ring_head(struct ring *p_ring)
311{
312
313 return p_ring->head;
314};
315
316static inline int qfec_ring_tail(struct ring *p_ring)
317{
318 return p_ring->tail;
319};
320
321static inline int qfec_ring_room(struct ring *p_ring)
322{
323 return p_ring->n_free;
324};
325
326/*
327 * counters track normal and abnormal driver events and activity
328 */
329enum cntr {
330 isr = 0,
331 fatal_bus,
332
333 early_tx,
334 tx_no_resource,
335 tx_proc_stopped,
336 tx_jabber_tmout,
337
338 xmit,
339 tx_int,
340 tx_isr,
341 tx_owned,
342 tx_underflow,
343
344 tx_replenish,
345 tx_skb_null,
346 tx_timeout,
347 tx_too_large,
348
349 gmac_isr,
350
351 /* half */
352 norm_int,
353 abnorm_int,
354
355 early_rx,
356 rx_buf_unavail,
357 rx_proc_stopped,
358 rx_watchdog,
359
360 netif_rx_cntr,
361 rx_int,
362 rx_isr,
363 rx_owned,
364 rx_overflow,
365
366 rx_dropped,
367 rx_skb_null,
368 queue_start,
369 queue_stop,
370
371 rx_paddr_nok,
372 ts_ioctl,
373 ts_tx_en,
374 ts_tx_rtn,
375
376 ts_rec,
377 cntr_last,
378};
379
380static char *cntr_name[] = {
381 "isr",
382 "fatal_bus",
383
384 "early_tx",
385 "tx_no_resource",
386 "tx_proc_stopped",
387 "tx_jabber_tmout",
388
389 "xmit",
390 "tx_int",
391 "tx_isr",
392 "tx_owned",
393 "tx_underflow",
394
395 "tx_replenish",
396 "tx_skb_null",
397 "tx_timeout",
398 "tx_too_large",
399
400 "gmac_isr",
401
402 /* half */
403 "norm_int",
404 "abnorm_int",
405
406 "early_rx",
407 "rx_buf_unavail",
408 "rx_proc_stopped",
409 "rx_watchdog",
410
411 "netif_rx",
412 "rx_int",
413 "rx_isr",
414 "rx_owned",
415 "rx_overflow",
416
417 "rx_dropped",
418 "rx_skb_null",
419 "queue_start",
420 "queue_stop",
421
422 "rx_paddr_nok",
423 "ts_ioctl",
424 "ts_tx_en",
425 "ts_tx_rtn",
426
427 "ts_rec",
428 ""
429};
430
431/*
432 * private data
433 */
434
435static struct net_device *qfec_dev;
436
437enum qfec_state {
438 timestamping = 0x04,
439};
440
441struct qfec_priv {
442 struct net_device *net_dev;
443 struct net_device_stats stats; /* req statistics */
444
445 struct device dev;
446
447 spinlock_t xmit_lock;
448 spinlock_t mdio_lock;
449
450 unsigned int state; /* driver state */
451
452 unsigned int bd_size; /* buf-desc alloc size */
453 struct qfec_buf_desc *bd_base; /* * qfec-buf-desc */
454 dma_addr_t tbd_dma; /* dma/phy-addr buf-desc */
455 dma_addr_t rbd_dma; /* dma/phy-addr buf-desc */
456
457 struct resource *mac_res;
458 void *mac_base; /* mac (virt) base address */
459
460 struct resource *clk_res;
461 void *clk_base; /* clk (virt) base address */
462
463 struct resource *fuse_res;
464 void *fuse_base; /* mac addr fuses */
465
466 unsigned int n_tbd; /* # of TX buf-desc */
467 struct ring ring_tbd; /* TX ring */
468 struct buf_desc *p_tbd;
469 unsigned int tx_ic_mod; /* (%) val for setting IC */
470
471 unsigned int n_rbd; /* # of RX buf-desc */
472 struct ring ring_rbd; /* RX ring */
473 struct buf_desc *p_rbd;
474
475 struct buf_desc *p_latest_rbd;
476 struct buf_desc *p_ending_rbd;
477
478 unsigned long cntr[cntr_last]; /* activity counters */
479
480 struct mii_if_info mii; /* used by mii lib */
481
482 int mdio_clk; /* phy mdio clock rate */
483 int phy_id; /* default PHY addr (0) */
484 struct timer_list phy_tmr; /* monitor PHY state */
485};
486
487/*
488 * cntrs display
489 */
490
491static int qfec_cntrs_show(struct device *dev, struct device_attribute *attr,
492 char *buf)
493{
494 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
495 int h = (cntr_last + 1) / 2;
496 int l;
497 int n;
498 int count = PAGE_SIZE;
499
500 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
501
502 l = snprintf(&buf[0], count, "%s:\n", __func__);
503 for (n = 0; n < h; n++) {
504 l += snprintf(&buf[l], count - l,
505 " %12lu %-16s %12lu %s\n",
506 priv->cntr[n], cntr_name[n],
507 priv->cntr[n+h], cntr_name[n+h]);
508 }
509
510 return l;
511}
512
513# define CNTR_INC(priv, name) (priv->cntr[name]++)
514
515/*
516 * functions that manage state
517 */
518static inline void qfec_queue_start(struct net_device *dev)
519{
520 struct qfec_priv *priv = netdev_priv(dev);
521
522 if (netif_queue_stopped(dev)) {
523 netif_wake_queue(dev);
524 CNTR_INC(priv, queue_start);
525 }
526};
527
528static inline void qfec_queue_stop(struct net_device *dev)
529{
530 struct qfec_priv *priv = netdev_priv(dev);
531
532 netif_stop_queue(dev);
533 CNTR_INC(priv, queue_stop);
534};
535
536/*
537 * functions to access and initialize the MAC registers
538 */
539static inline uint32_t qfec_reg_read(struct qfec_priv *priv, uint32_t reg)
540{
541 return ioread32((void *) (priv->mac_base + reg));
542}
543
544static void qfec_reg_write(struct qfec_priv *priv, uint32_t reg, uint32_t val)
545{
546 uint32_t addr = (uint32_t)priv->mac_base + reg;
547
548 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
549 iowrite32(val, (void *)addr);
550}
551
552/*
553 * speed/duplex/pause settings
554 */
555static int qfec_config_show(struct device *dev, struct device_attribute *attr,
556 char *buf)
557{
558 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
559 int cfg = qfec_reg_read(priv, MAC_CONFIG_REG);
560 int flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
561 int l = 0;
562 int count = PAGE_SIZE;
563
564 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
565
566 l += snprintf(&buf[l], count, "%s:", __func__);
567
568 l += snprintf(&buf[l], count - l, " [0x%08x] %4dM %s %s", cfg,
569 (cfg & MAC_CONFIG_REG_PS)
570 ? ((cfg & MAC_CONFIG_REG_FES) ? 100 : 10) : 1000,
571 cfg & MAC_CONFIG_REG_DM ? "FD" : "HD",
572 cfg & MAC_CONFIG_REG_IPC ? "IPC" : "NoIPC");
573
574 flow &= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
575 l += snprintf(&buf[l], count - l, " [0x%08x] %s", flow,
576 (flow == (FLOW_CONTROL_RFE | FLOW_CONTROL_TFE)) ? "PAUSE"
577 : ((flow == FLOW_CONTROL_RFE) ? "RX-PAUSE"
578 : ((flow == FLOW_CONTROL_TFE) ? "TX-PAUSE" : "")));
579
580 l += snprintf(&buf[l], count - l, " %s", QFEC_DRV_VER);
581 l += snprintf(&buf[l], count - l, "\n");
582 return l;
583}
584
585
586/*
587 * table and functions to initialize controller registers
588 */
589
590struct reg_entry {
591 unsigned int rdonly;
592 unsigned int addr;
593 char *label;
594 unsigned int val;
595};
596
597static struct reg_entry qfec_reg_tbl[] = {
598 { 0, BUS_MODE_REG, "BUS_MODE_REG", BUS_MODE_REG_DEFAULT },
599 { 0, AXI_BUS_MODE_REG, "AXI_BUS_MODE_REG", AXI_BUS_MODE_DEFAULT },
600 { 0, AXI_STATUS_REG, "AXI_STATUS_REG", 0 },
601
602 { 0, MAC_ADR_0_HIGH_REG, "MAC_ADR_0_HIGH_REG", 0x00000302 },
603 { 0, MAC_ADR_0_LOW_REG, "MAC_ADR_0_LOW_REG", 0x01350702 },
604
605 { 1, RX_DES_LST_ADR_REG, "RX_DES_LST_ADR_REG", 0 },
606 { 1, TX_DES_LST_ADR_REG, "TX_DES_LST_ADR_REG", 0 },
607 { 1, STATUS_REG, "STATUS_REG", 0 },
608 { 1, DEBUG_REG, "DEBUG_REG", 0 },
609
610 { 0, INTRP_EN_REG, "INTRP_EN_REG", QFEC_INTRP_SETUP},
611
612 { 1, CUR_HOST_TX_DES_REG, "CUR_HOST_TX_DES_REG", 0 },
613 { 1, CUR_HOST_RX_DES_REG, "CUR_HOST_RX_DES_REG", 0 },
614 { 1, CUR_HOST_TX_BU_ADR_REG, "CUR_HOST_TX_BU_ADR_REG", 0 },
615 { 1, CUR_HOST_RX_BU_ADR_REG, "CUR_HOST_RX_BU_ADR_REG", 0 },
616
617 { 1, MAC_FR_FILTER_REG, "MAC_FR_FILTER_REG", 0 },
618
619 { 0, MAC_CONFIG_REG, "MAC_CONFIG_REG", MAC_CONFIG_REG_SPD_1G
620 | MAC_CONFIG_REG_DM
621 | MAC_CONFIG_REG_TE
622 | MAC_CONFIG_REG_RE
623 | MAC_CONFIG_REG_IPC },
624
625 { 1, INTRP_STATUS_REG, "INTRP_STATUS_REG", 0 },
626 { 1, INTRP_MASK_REG, "INTRP_MASK_REG", 0 },
627
628 { 0, OPER_MODE_REG, "OPER_MODE_REG", OPER_MODE_REG_DEFAULT },
629
630 { 1, GMII_ADR_REG, "GMII_ADR_REG", 0 },
631 { 1, GMII_DATA_REG, "GMII_DATA_REG", 0 },
632
633 { 0, MMC_INTR_MASK_RX_REG, "MMC_INTR_MASK_RX_REG", 0xFFFFFFFF },
634 { 0, MMC_INTR_MASK_TX_REG, "MMC_INTR_MASK_TX_REG", 0xFFFFFFFF },
635
636 { 1, TS_HIGH_REG, "TS_HIGH_REG", 0 },
637 { 1, TS_LOW_REG, "TS_LOW_REG", 0 },
638
639 { 1, TS_HI_UPDT_REG, "TS_HI_UPDATE_REG", 0 },
640 { 1, TS_LO_UPDT_REG, "TS_LO_UPDATE_REG", 0 },
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -0700641 { 0, TS_SUB_SEC_INCR_REG, "TS_SUB_SEC_INCR_REG", 1 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 { 0, TS_CTL_REG, "TS_CTL_REG", TS_CTL_TSENALL
643 | TS_CTL_TSCTRLSSR
644 | TS_CTL_TSINIT
645 | TS_CTL_TSENA },
646};
647
648static void qfec_reg_init(struct qfec_priv *priv)
649{
650 struct reg_entry *p = qfec_reg_tbl;
651 int n = ARRAY_SIZE(qfec_reg_tbl);
652
653 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
654
655 for (; n--; p++) {
656 if (!p->rdonly)
657 qfec_reg_write(priv, p->addr, p->val);
658 }
659}
660
661/*
662 * display registers thru sysfs
663 */
664static int qfec_reg_show(struct device *dev, struct device_attribute *attr,
665 char *buf)
666{
667 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
668 struct reg_entry *p = qfec_reg_tbl;
669 int n = ARRAY_SIZE(qfec_reg_tbl);
670 int l = 0;
671 int count = PAGE_SIZE;
672
673 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
674
675 for (; n--; p++) {
676 l += snprintf(&buf[l], count - l, " %8p %04x %08x %s\n",
677 (void *)priv->mac_base + p->addr, p->addr,
678 qfec_reg_read(priv, p->addr), p->label);
679 }
680
681 return l;
682}
683
684/*
685 * set the MAC-0 address
686 */
687static void qfec_set_adr_regs(struct qfec_priv *priv, uint8_t *addr)
688{
689 uint32_t h = 0;
690 uint32_t l = 0;
691
692 h = h << 8 | addr[5];
693 h = h << 8 | addr[4];
694
695 l = l << 8 | addr[3];
696 l = l << 8 | addr[2];
697 l = l << 8 | addr[1];
698 l = l << 8 | addr[0];
699
700 qfec_reg_write(priv, MAC_ADR_0_HIGH_REG, h);
701 qfec_reg_write(priv, MAC_ADR_0_LOW_REG, l);
702
703 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x %08x\n", __func__, h, l);
704}
705
706/*
Rohit Vaswani0565a2d2011-09-15 12:53:07 -0700707 * set up the RX filter
708 */
709static void qfec_set_rx_mode(struct net_device *dev)
710{
711 struct qfec_priv *priv = netdev_priv(dev);
712 uint32_t filter_conf;
713 int index;
714
715 /* Clear address filter entries */
716 for (index = 1; index < MAC_ADR_MAX; ++index) {
717 qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), 0);
718 qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), 0);
719 }
720
721 if (dev->flags & IFF_PROMISC) {
722 /* Receive all frames */
723 filter_conf = MAC_FR_FILTER_RA;
724 } else if ((dev->flags & IFF_MULTICAST) == 0) {
725 /* Unicast filtering only */
726 filter_conf = MAC_FR_FILTER_HPF;
727 } else if ((netdev_mc_count(dev) > MAC_ADR_MAX - 1) ||
728 (dev->flags & IFF_ALLMULTI)) {
729 /* Unicast filtering is enabled, Pass all multicast frames */
730 filter_conf = MAC_FR_FILTER_HPF | MAC_FR_FILTER_PM;
731 } else {
732 struct netdev_hw_addr *ha;
733
734 /* Both unicast and multicast filtering are enabled */
735 filter_conf = MAC_FR_FILTER_HPF;
736
737 index = 1;
738
739 netdev_for_each_mc_addr(ha, dev) {
740 uint32_t high, low;
741
742 high = (1 << 31) | (ha->addr[5] << 8) | (ha->addr[4]);
743 low = (ha->addr[3] << 24) | (ha->addr[2] << 16) |
744 (ha->addr[1] << 8) | (ha->addr[0]);
745
746 qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), high);
747 qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), low);
748
749 index++;
750 }
751 }
752
753 qfec_reg_write(priv, MAC_FR_FILTER_REG, filter_conf);
754}
755
756/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 * reset the controller
758 */
759
760#define QFEC_RESET_TIMEOUT 10000
761 /* reset should always clear but did not w/o test/delay
762 * in RgMii mode. there is no spec'd max timeout
763 */
764
765static int qfec_hw_reset(struct qfec_priv *priv)
766{
767 int timeout = QFEC_RESET_TIMEOUT;
768
769 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
770
771 qfec_reg_write(priv, BUS_MODE_REG, BUS_MODE_SWR);
772
773 while (qfec_reg_read(priv, BUS_MODE_REG) & BUS_MODE_SWR) {
774 if (timeout-- == 0) {
775 QFEC_LOG_ERR("%s: timeout\n", __func__);
776 return -ETIME;
777 }
778
779 /* there were problems resetting the controller
780 * in RGMII mode when there wasn't sufficient
781 * delay between register reads
782 */
783 usleep_range(100, 200);
784 }
785
786 return 0;
787}
788
789/*
790 * initialize controller
791 */
792static int qfec_hw_init(struct qfec_priv *priv)
793{
794 int res = 0;
795
796 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
797
798 res = qfec_hw_reset(priv);
799 if (res)
800 return res;
801
802 qfec_reg_init(priv);
803
804 /* config buf-desc locations */
805 qfec_reg_write(priv, TX_DES_LST_ADR_REG, priv->tbd_dma);
806 qfec_reg_write(priv, RX_DES_LST_ADR_REG, priv->rbd_dma);
807
808 /* clear interrupts */
809 qfec_reg_write(priv, STATUS_REG, INTRP_EN_REG_NIE | INTRP_EN_REG_RIE
810 | INTRP_EN_REG_TIE | INTRP_EN_REG_TUE | INTRP_EN_REG_ETE);
811
812 return res;
813}
814
815/*
816 * en/disable controller
817 */
818static void qfec_hw_enable(struct qfec_priv *priv)
819{
820 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
821
822 qfec_reg_write(priv, OPER_MODE_REG,
823 qfec_reg_read(priv, OPER_MODE_REG)
824 | OPER_MODE_REG_ST | OPER_MODE_REG_SR);
825}
826
827static void qfec_hw_disable(struct qfec_priv *priv)
828{
829 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
830
831 qfec_reg_write(priv, OPER_MODE_REG,
832 qfec_reg_read(priv, OPER_MODE_REG)
833 & ~(OPER_MODE_REG_ST | OPER_MODE_REG_SR));
834}
835
836/*
837 * interface selection
838 */
839struct intf_config {
840 uint32_t intf_sel;
841 uint32_t emac_ns;
842 uint32_t eth_x_en_ns;
843 uint32_t clkmux_sel;
844};
845
846#define ETH_X_EN_NS_REVMII (ETH_X_EN_NS_DEFAULT | ETH_TX_CLK_INV)
847#define CLKMUX_REVMII (EMAC_CLKMUX_SEL_0 | EMAC_CLKMUX_SEL_1)
848
849static struct intf_config intf_config_tbl[] = {
850 { EMAC_PHY_INTF_SEL_MII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
851 { EMAC_PHY_INTF_SEL_RGMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
852 { EMAC_PHY_INTF_SEL_REVMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_REVMII,
853 CLKMUX_REVMII }
854};
855
856/*
857 * emac clk register read and write functions
858 */
859static inline uint32_t qfec_clkreg_read(struct qfec_priv *priv, uint32_t reg)
860{
861 return ioread32((void *) (priv->clk_base + reg));
862}
863
864static inline void qfec_clkreg_write(struct qfec_priv *priv,
865 uint32_t reg, uint32_t val)
866{
867 uint32_t addr = (uint32_t)priv->clk_base + reg;
868
869 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
870 iowrite32(val, (void *)addr);
871}
872
873/*
874 * configure the PHY interface and clock routing and signal bits
875 */
876enum phy_intfc {
877 intfc_mii = 0,
878 intfc_rgmii = 1,
879 intfc_revmii = 2,
880};
881
882static int qfec_intf_sel(struct qfec_priv *priv, unsigned int intfc)
883{
884 struct intf_config *p;
885
886 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d\n", __func__, intfc);
887
888 if (intfc > intfc_revmii) {
889 QFEC_LOG_ERR("%s: range\n", __func__);
890 return -ENXIO;
891 }
892
893 p = &intf_config_tbl[intfc];
894
895 qfec_clkreg_write(priv, EMAC_PHY_INTF_SEL_REG, p->intf_sel);
896 qfec_clkreg_write(priv, EMAC_NS_REG, p->emac_ns);
897 qfec_clkreg_write(priv, ETH_X_EN_NS_REG, p->eth_x_en_ns);
898 qfec_clkreg_write(priv, EMAC_CLKMUX_SEL_REG, p->clkmux_sel);
899
900 return 0;
901}
902
903/*
904 * display registers thru proc-fs
905 */
906static struct qfec_clk_reg {
907 uint32_t offset;
908 char *label;
909} qfec_clk_regs[] = {
910 { ETH_MD_REG, "ETH_MD_REG" },
911 { ETH_NS_REG, "ETH_NS_REG" },
912 { ETH_X_EN_NS_REG, "ETH_X_EN_NS_REG" },
913 { EMAC_PTP_MD_REG, "EMAC_PTP_MD_REG" },
914 { EMAC_PTP_NS_REG, "EMAC_PTP_NS_REG" },
915 { EMAC_NS_REG, "EMAC_NS_REG" },
916 { EMAC_TX_FS_REG, "EMAC_TX_FS_REG" },
917 { EMAC_RX_FS_REG, "EMAC_RX_FS_REG" },
918 { EMAC_PHY_INTF_SEL_REG, "EMAC_PHY_INTF_SEL_REG" },
919 { EMAC_PHY_ADDR_REG, "EMAC_PHY_ADDR_REG" },
920 { EMAC_REVMII_PHY_ADDR_REG, "EMAC_REVMII_PHY_ADDR_REG" },
921 { EMAC_CLKMUX_SEL_REG, "EMAC_CLKMUX_SEL_REG" },
922};
923
924static int qfec_clk_reg_show(struct device *dev, struct device_attribute *attr,
925 char *buf)
926{
927 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
928 struct qfec_clk_reg *p = qfec_clk_regs;
929 int n = ARRAY_SIZE(qfec_clk_regs);
930 int l = 0;
931 int count = PAGE_SIZE;
932
933 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
934
935 for (; n--; p++) {
936 l += snprintf(&buf[l], count - l, " %8p %8x %08x %s\n",
937 (void *)priv->clk_base + p->offset, p->offset,
938 qfec_clkreg_read(priv, p->offset), p->label);
939 }
940
941 return l;
942}
943
944/*
945 * speed selection
946 */
947
948struct qfec_pll_cfg {
949 uint32_t spd;
950 uint32_t eth_md; /* M [31:16], NOT 2*D [15:0] */
951 uint32_t eth_ns; /* NOT(M-N) [31:16], ctl bits [11:0] */
952};
953
954static struct qfec_pll_cfg qfec_pll_cfg_tbl[] = {
955 /* 2.5 MHz */
956 { MAC_CONFIG_REG_SPD_10, ETH_MD_M(1) | ETH_MD_2D_N(100),
957 ETH_NS_NM(100-1)
958 | ETH_NS_MCNTR_EN
959 | ETH_NS_MCNTR_MODE_DUAL
960 | ETH_NS_PRE_DIV(0)
961 | CLK_SRC_PLL_EMAC },
962 /* 25 MHz */
963 { MAC_CONFIG_REG_SPD_100, ETH_MD_M(1) | ETH_MD_2D_N(10),
964 ETH_NS_NM(10-1)
965 | ETH_NS_MCNTR_EN
966 | ETH_NS_MCNTR_MODE_DUAL
967 | ETH_NS_PRE_DIV(0)
968 | CLK_SRC_PLL_EMAC },
969 /* 125 MHz */
970 {MAC_CONFIG_REG_SPD_1G, 0, ETH_NS_PRE_DIV(1)
971 | CLK_SRC_PLL_EMAC },
972};
973
974enum speed {
975 spd_10 = 0,
976 spd_100 = 1,
977 spd_1000 = 2,
978};
979
980/*
981 * configure the PHY interface and clock routing and signal bits
982 */
983static int qfec_speed_cfg(struct net_device *dev, unsigned int spd,
984 unsigned int dplx)
985{
986 struct qfec_priv *priv = netdev_priv(dev);
987 struct qfec_pll_cfg *p;
988
989 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d spd, %d dplx\n", __func__, spd, dplx);
990
991 if (spd > spd_1000) {
992 QFEC_LOG_ERR("%s: range\n", __func__);
993 return -ENODEV;
994 }
995
996 p = &qfec_pll_cfg_tbl[spd];
997
998 /* set the MAC speed bits */
999 qfec_reg_write(priv, MAC_CONFIG_REG,
1000 (qfec_reg_read(priv, MAC_CONFIG_REG)
1001 & ~(MAC_CONFIG_REG_SPD | MAC_CONFIG_REG_DM))
1002 | p->spd | (dplx ? MAC_CONFIG_REG_DM : 0));
1003
1004 qfec_clkreg_write(priv, ETH_MD_REG, p->eth_md);
1005 qfec_clkreg_write(priv, ETH_NS_REG, p->eth_ns);
1006
1007 return 0;
1008}
1009
1010/*
1011 * configure PTP divider for 25 MHz assuming EMAC PLL 250 MHz
1012 */
1013
1014static struct qfec_pll_cfg qfec_pll_ptp = {
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001015 /* 19.2 MHz tcxo */
1016 0, 0, ETH_NS_PRE_DIV(0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017 | EMAC_PTP_NS_ROOT_EN
1018 | EMAC_PTP_NS_CLK_EN
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001019 | CLK_SRC_TCXO
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020};
1021
1022#define PLLTEST_PAD_CFG 0x01E0
1023#define PLLTEST_PLL_7 0x3700
1024
1025#define CLKTEST_REG 0x01EC
1026#define CLKTEST_EMAC_RX 0x3fc07f7a
1027
1028static int qfec_ptp_cfg(struct qfec_priv *priv)
1029{
1030 struct qfec_pll_cfg *p = &qfec_pll_ptp;
1031
1032 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x md, %08x ns\n",
1033 __func__, p->eth_md, p->eth_ns);
1034
1035 qfec_clkreg_write(priv, EMAC_PTP_MD_REG, p->eth_md);
1036 qfec_clkreg_write(priv, EMAC_PTP_NS_REG, p->eth_ns);
1037
1038 /* configure HS/LS clk test ports to verify clks */
1039 qfec_clkreg_write(priv, CLKTEST_REG, CLKTEST_EMAC_RX);
1040 qfec_clkreg_write(priv, PLLTEST_PAD_CFG, PLLTEST_PLL_7);
1041
1042 return 0;
1043}
1044
1045/*
1046 * MDIO operations
1047 */
1048
1049/*
1050 * wait reasonable amount of time for MDIO operation to complete, not busy
1051 */
1052static int qfec_mdio_busy(struct net_device *dev)
1053{
1054 int i;
1055
1056 for (i = 100; i > 0; i--) {
1057 if (!(qfec_reg_read(
1058 netdev_priv(dev), GMII_ADR_REG) & GMII_ADR_REG_GB)) {
1059 return 0;
1060 }
1061 udelay(1);
1062 }
1063
1064 return -ETIME;
1065}
1066
1067/*
1068 * initiate either a read or write MDIO operation
1069 */
1070
1071static int qfec_mdio_oper(struct net_device *dev, int phy_id, int reg, int wr)
1072{
1073 struct qfec_priv *priv = netdev_priv(dev);
1074 int res = 0;
1075
1076 /* insure phy not busy */
1077 res = qfec_mdio_busy(dev);
1078 if (res) {
1079 QFEC_LOG_ERR("%s: busy\n", __func__);
1080 goto done;
1081 }
1082
1083 /* initiate operation */
1084 qfec_reg_write(priv, GMII_ADR_REG,
1085 GMII_ADR_REG_ADR_SET(phy_id)
1086 | GMII_ADR_REG_REG_SET(reg)
1087 | GMII_ADR_REG_CSR_SET(priv->mdio_clk)
1088 | (wr ? GMII_ADR_REG_GW : 0)
1089 | GMII_ADR_REG_GB);
1090
1091 /* wait for operation to complete */
1092 res = qfec_mdio_busy(dev);
1093 if (res)
1094 QFEC_LOG_ERR("%s: timeout\n", __func__);
1095
1096done:
1097 return res;
1098}
1099
1100/*
1101 * read MDIO register
1102 */
1103static int qfec_mdio_read(struct net_device *dev, int phy_id, int reg)
1104{
1105 struct qfec_priv *priv = netdev_priv(dev);
1106 int res = 0;
1107 unsigned long flags;
1108
1109 spin_lock_irqsave(&priv->mdio_lock, flags);
1110
1111 res = qfec_mdio_oper(dev, phy_id, reg, 0);
1112 if (res) {
1113 QFEC_LOG_ERR("%s: oper\n", __func__);
1114 goto done;
1115 }
1116
1117 res = qfec_reg_read(priv, GMII_DATA_REG);
1118 QFEC_LOG(QFEC_LOG_MDIO_R, "%s: %2d reg, 0x%04x val\n",
1119 __func__, reg, res);
1120
1121done:
1122 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1123 return res;
1124}
1125
1126/*
1127 * write MDIO register
1128 */
1129static void qfec_mdio_write(struct net_device *dev, int phy_id, int reg,
1130 int val)
1131{
1132 struct qfec_priv *priv = netdev_priv(dev);
1133 unsigned long flags;
1134
1135 spin_lock_irqsave(&priv->mdio_lock, flags);
1136
1137 QFEC_LOG(QFEC_LOG_MDIO_W, "%s: %2d reg, %04x\n",
1138 __func__, reg, val);
1139
1140 qfec_reg_write(priv, GMII_DATA_REG, val);
1141
1142 if (qfec_mdio_oper(dev, phy_id, reg, 1))
1143 QFEC_LOG_ERR("%s: oper\n", __func__);
1144
1145 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1146}
1147
1148/*
1149 * get auto-negotiation results
1150 */
1151
1152#define QFEC_100 (LPA_100HALF | LPA_100FULL | LPA_100HALF)
1153#define QFEC_100_FD (LPA_100FULL | LPA_100BASE4)
1154#define QFEC_10 (LPA_10HALF | LPA_10FULL)
1155#define QFEC_10_FD LPA_10FULL
1156
1157static void qfec_get_an(struct net_device *dev, uint32_t *spd, uint32_t *dplx)
1158{
1159 struct qfec_priv *priv = netdev_priv(dev);
1160 uint32_t status;
1161 uint32_t advert;
1162 uint32_t lpa;
1163 uint32_t flow;
1164
1165 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
1166 lpa = qfec_mdio_read(dev, priv->phy_id, MII_LPA);
1167 status = advert & lpa;
1168
1169 /* todo: check extended status register for 1G abilities */
1170
1171 if (status & QFEC_100) {
1172 *spd = spd_100;
1173 *dplx = status & QFEC_100_FD ? 1 : 0;
1174 }
1175
1176 else if (status & QFEC_10) {
1177 *spd = spd_10;
1178 *dplx = status & QFEC_10_FD ? 1 : 0;
1179 }
1180
1181 /* check pause */
1182 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
1183 flow &= ~(FLOW_CONTROL_TFE | FLOW_CONTROL_RFE);
1184
1185 if (status & ADVERTISE_PAUSE_CAP) {
1186 flow |= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
1187 } else if (status & ADVERTISE_PAUSE_ASYM) {
1188 if (lpa & ADVERTISE_PAUSE_CAP)
1189 flow |= FLOW_CONTROL_TFE;
1190 else if (advert & ADVERTISE_PAUSE_CAP)
1191 flow |= FLOW_CONTROL_RFE;
1192 }
1193
1194 qfec_reg_write(priv, FLOW_CONTROL_REG, flow);
1195}
1196
1197/*
1198 * monitor phy status, and process auto-neg results when changed
1199 */
1200
1201static void qfec_phy_monitor(unsigned long data)
1202{
1203 struct net_device *dev = (struct net_device *) data;
1204 struct qfec_priv *priv = netdev_priv(dev);
1205 unsigned int spd = 0;
1206 unsigned int dplx = 1;
1207
1208 mod_timer(&priv->phy_tmr, jiffies + HZ);
1209
1210 if (mii_link_ok(&priv->mii) && !netif_carrier_ok(priv->net_dev)) {
1211 qfec_get_an(dev, &spd, &dplx);
1212 qfec_speed_cfg(dev, spd, dplx);
1213 QFEC_LOG(QFEC_LOG_DBG, "%s: link up, %d spd, %d dplx\n",
1214 __func__, spd, dplx);
1215
1216 netif_carrier_on(dev);
1217 }
1218
1219 else if (!mii_link_ok(&priv->mii) && netif_carrier_ok(priv->net_dev)) {
1220 QFEC_LOG(QFEC_LOG_DBG, "%s: link down\n", __func__);
1221 netif_carrier_off(dev);
1222 }
1223}
1224
1225/*
1226 * dealloc buffer descriptor memory
1227 */
1228
1229static void qfec_mem_dealloc(struct net_device *dev)
1230{
1231 struct qfec_priv *priv = netdev_priv(dev);
1232
1233 dma_free_coherent(&dev->dev,
1234 priv->bd_size, priv->bd_base, priv->tbd_dma);
1235 priv->bd_base = 0;
1236}
1237
1238/*
1239 * allocate shared device memory for TX/RX buf-desc (and buffers)
1240 */
1241
1242static int qfec_mem_alloc(struct net_device *dev)
1243{
1244 struct qfec_priv *priv = netdev_priv(dev);
1245
1246 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1247
1248 priv->bd_size =
1249 (priv->n_tbd + priv->n_rbd) * sizeof(struct qfec_buf_desc);
1250
1251 priv->p_tbd = kcalloc(priv->n_tbd, sizeof(struct buf_desc), GFP_KERNEL);
1252 if (!priv->p_tbd) {
1253 QFEC_LOG_ERR("%s: kcalloc failed p_tbd\n", __func__);
1254 return -ENOMEM;
1255 }
1256
1257 priv->p_rbd = kcalloc(priv->n_rbd, sizeof(struct buf_desc), GFP_KERNEL);
1258 if (!priv->p_rbd) {
1259 QFEC_LOG_ERR("%s: kcalloc failed p_rbd\n", __func__);
1260 return -ENOMEM;
1261 }
1262
1263 /* alloc mem for buf-desc, if not already alloc'd */
1264 if (!priv->bd_base) {
1265 priv->bd_base = dma_alloc_coherent(&dev->dev,
1266 priv->bd_size, &priv->tbd_dma,
1267 GFP_KERNEL | __GFP_DMA);
1268 }
1269
1270 if (!priv->bd_base) {
1271 QFEC_LOG_ERR("%s: dma_alloc_coherent failed\n", __func__);
1272 return -ENOMEM;
1273 }
1274
1275 priv->rbd_dma = priv->tbd_dma
1276 + (priv->n_tbd * sizeof(struct qfec_buf_desc));
1277
1278 QFEC_LOG(QFEC_LOG_DBG,
1279 " %s: 0x%08x size, %d n_tbd, %d n_rbd\n",
1280 __func__, priv->bd_size, priv->n_tbd, priv->n_rbd);
1281
1282 return 0;
1283}
1284
1285/*
1286 * display buffer descriptors
1287 */
1288
1289static int qfec_bd_fmt(char *buf, int size, struct buf_desc *p_bd)
1290{
1291 return snprintf(buf, size,
1292 "%8p: %08x %08x %8p %8p %8p %8p %8p %x",
1293 p_bd, qfec_bd_status_get(p_bd),
1294 qfec_bd_ctl_get(p_bd), qfec_bd_pbuf_get(p_bd),
1295 qfec_bd_next_get(p_bd), qfec_bd_skbuf_get(p_bd),
1296 qfec_bd_virt_get(p_bd), qfec_bd_phys_get(p_bd),
1297 qfec_bd_last_bd(p_bd));
1298}
1299
1300static int qfec_bd_show(char *buf, int count, struct buf_desc *p_bd, int n_bd,
1301 struct ring *p_ring, char *label)
1302{
1303 int l = 0;
1304 int n;
1305
1306 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, label);
1307
1308 l += snprintf(&buf[l], count, "%s: %s\n", __func__, label);
1309 if (!p_bd)
1310 return l;
1311
1312 n_bd = n_bd > MAX_N_BD ? MAX_N_BD : n_bd;
1313
1314 for (n = 0; n < n_bd; n++, p_bd++) {
1315 l += qfec_bd_fmt(&buf[l], count - l, p_bd);
1316 l += snprintf(&buf[l], count - l, "%s%s\n",
1317 (qfec_ring_head(p_ring) == n ? " < h" : ""),
1318 (qfec_ring_tail(p_ring) == n ? " < t" : ""));
1319 }
1320
1321 return l;
1322}
1323
1324/*
1325 * display TX BDs
1326 */
1327static int qfec_bd_tx_show(struct device *dev, struct device_attribute *attr,
1328 char *buf)
1329{
1330 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1331 int count = PAGE_SIZE;
1332
1333 return qfec_bd_show(buf, count, priv->p_tbd, priv->n_tbd,
1334 &priv->ring_tbd, "TX");
1335}
1336
1337/*
1338 * display RX BDs
1339 */
1340static int qfec_bd_rx_show(struct device *dev, struct device_attribute *attr,
1341 char *buf)
1342{
1343 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1344 int count = PAGE_SIZE;
1345
1346 return qfec_bd_show(buf, count, priv->p_rbd, priv->n_rbd,
1347 &priv->ring_rbd, "RX");
1348}
1349
1350/*
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001351 * process timestamp values
1352 * The pbuf and next fields of the buffer descriptors are overwritten
1353 * with the timestamp high and low register values.
1354 *
1355 * The low register is incremented by the value in the subsec_increment
1356 * register and overflows at 0x8000 0000 causing the high register to
1357 * increment.
1358 *
1359 * The subsec_increment register is recommended to be set to the number
1360 * of nanosec corresponding to each clock tic, scaled by 2^31 / 10^9
1361 * (e.g. 40 * 2^32 / 10^9 = 85.9, or 86 for 25 MHz). However, the
1362 * rounding error in this case will result in a 1 sec error / ~14 mins.
1363 *
1364 * An alternate approach is used. The subsec_increment is set to 1,
1365 * and the concatenation of the 2 timestamp registers used to count
1366 * clock tics. The 63-bit result is manipulated to determine the number
1367 * of sec and ns.
1368 */
1369
1370/*
1371 * convert 19.2 MHz clock tics into sec/ns
1372 */
1373#define TS_LOW_REG_BITS 31
1374
1375#define MILLION 1000000UL
1376#define BILLION 1000000000UL
1377
1378#define F_CLK 19200000UL
1379#define F_CLK_PRE_SC 24
1380#define F_CLK_INV_Q 56
1381#define F_CLK_INV (((unsigned long long)1 << F_CLK_INV_Q) / F_CLK)
1382#define F_CLK_TO_NS_Q 25
1383#define F_CLK_TO_NS \
1384 (((((unsigned long long)1<<F_CLK_TO_NS_Q)*BILLION)+(F_CLK-1))/F_CLK)
1385#define US_TO_F_CLK_Q 20
1386#define US_TO_F_CLK \
1387 (((((unsigned long long)1<<US_TO_F_CLK_Q)*F_CLK)+(MILLION-1))/MILLION)
1388
1389static inline void qfec_get_sec(uint64_t *cnt,
1390 uint32_t *sec, uint32_t *ns)
1391{
1392 unsigned long long t;
1393 unsigned long long subsec;
1394
1395 t = *cnt >> F_CLK_PRE_SC;
1396 t *= F_CLK_INV;
1397 t >>= F_CLK_INV_Q - F_CLK_PRE_SC;
1398 *sec = t;
1399
1400 t = *cnt - (t * F_CLK);
1401 subsec = t;
1402
1403 if (subsec >= F_CLK) {
1404 subsec -= F_CLK;
1405 *sec += 1;
1406 }
1407
1408 subsec *= F_CLK_TO_NS;
1409 subsec >>= F_CLK_TO_NS_Q;
1410 *ns = subsec;
1411}
1412
1413/*
1414 * read ethernet timestamp registers, pass up raw register values
1415 * and values converted to sec/ns
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001416 */
1417static void qfec_read_timestamp(struct buf_desc *p_bd,
1418 struct skb_shared_hwtstamps *ts)
1419{
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001420 unsigned long long cnt;
1421 unsigned int sec;
1422 unsigned int subsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001424 cnt = (unsigned long)qfec_bd_next_get(p_bd);
1425 cnt <<= TS_LOW_REG_BITS;
1426 cnt |= (unsigned long)qfec_bd_pbuf_get(p_bd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001427
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001428 /* report raw counts as concatenated 63 bits */
1429 sec = cnt >> 32;
1430 subsec = cnt & 0xffffffff;
1431
1432 ts->hwtstamp = ktime_set(sec, subsec);
1433
1434 /* translate counts to sec and ns */
1435 qfec_get_sec(&cnt, &sec, &subsec);
1436
1437 ts->syststamp = ktime_set(sec, subsec);
1438}
1439
1440/*
1441 * capture the current system time in the timestamp registers
1442 */
1443static int qfec_cmd(struct device *dev, struct device_attribute *attr,
1444 const char *buf, size_t count)
1445{
1446 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1447 struct timeval tv;
1448
1449 if (!strncmp(buf, "setTs", 5)) {
1450 unsigned long long cnt;
1451 uint32_t ts_hi;
1452 uint32_t ts_lo;
1453 unsigned long long subsec;
1454
1455 do_gettimeofday(&tv);
1456
1457 /* convert raw sec/usec to ns */
1458 subsec = tv.tv_usec;
1459 subsec *= US_TO_F_CLK;
1460 subsec >>= US_TO_F_CLK_Q;
1461
1462 cnt = tv.tv_sec;
1463 cnt *= F_CLK;
1464 cnt += subsec;
1465
1466 ts_hi = cnt >> 31;
1467 ts_lo = cnt & 0x7FFFFFFF;
1468
1469 qfec_reg_write(priv, TS_HI_UPDT_REG, ts_hi);
1470 qfec_reg_write(priv, TS_LO_UPDT_REG, ts_lo);
1471
1472 qfec_reg_write(priv, TS_CTL_REG,
1473 qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSINIT);
1474 } else
1475 pr_err("%s: unknown cmd, %s.\n", __func__, buf);
1476
1477 return strnlen(buf, count);
1478}
1479
1480/*
1481 * display ethernet tstamp and system time
1482 */
1483static int qfec_tstamp_show(struct device *dev, struct device_attribute *attr,
1484 char *buf)
1485{
1486 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1487 int count = PAGE_SIZE;
1488 int l;
1489 struct timeval tv;
1490 unsigned long long cnt;
1491 uint32_t sec;
1492 uint32_t ns;
1493 uint32_t ts_hi;
1494 uint32_t ts_lo;
1495
1496 /* insure that ts_hi didn't increment during read */
1497 do {
1498 ts_hi = qfec_reg_read(priv, TS_HIGH_REG);
1499 ts_lo = qfec_reg_read(priv, TS_LOW_REG);
1500 } while (ts_hi != qfec_reg_read(priv, TS_HIGH_REG));
1501
1502 cnt = ts_hi;
1503 cnt <<= TS_LOW_REG_BITS;
1504 cnt |= ts_lo;
1505
1506 do_gettimeofday(&tv);
1507
1508 ts_hi = cnt >> 32;
1509 ts_lo = cnt & 0xffffffff;
1510
1511 qfec_get_sec(&cnt, &sec, &ns);
1512
1513 l = snprintf(buf, count,
1514 "%12u.%09u sec 0x%08x 0x%08x tstamp %12u.%06u time-of-day\n",
1515 sec, ns, ts_hi, ts_lo, (int)tv.tv_sec, (int)tv.tv_usec);
1516
1517 return l;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518}
1519
1520/*
1521 * free transmitted skbufs from buffer-descriptor no owned by HW
1522 */
1523static int qfec_tx_replenish(struct net_device *dev)
1524{
1525 struct qfec_priv *priv = netdev_priv(dev);
1526 struct ring *p_ring = &priv->ring_tbd;
1527 struct buf_desc *p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1528 struct sk_buff *skb;
1529 unsigned long flags;
1530
1531 CNTR_INC(priv, tx_replenish);
1532
1533 spin_lock_irqsave(&priv->xmit_lock, flags);
1534
1535 while (!qfec_ring_empty(p_ring)) {
1536 if (qfec_bd_own(p_bd))
1537 break; /* done for now */
1538
1539 skb = qfec_bd_skbuf_get(p_bd);
1540 if (unlikely(skb == NULL)) {
1541 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1542 CNTR_INC(priv, tx_skb_null);
1543 break;
1544 }
1545
1546 qfec_reg_write(priv, STATUS_REG,
1547 STATUS_REG_TU | STATUS_REG_TI);
1548
1549 /* retrieve timestamp if requested */
1550 if (qfec_bd_status_get(p_bd) & BUF_TX_TTSS) {
1551 CNTR_INC(priv, ts_tx_rtn);
1552 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1553 skb_tstamp_tx(skb, skb_hwtstamps(skb));
1554 }
1555
1556 /* update statistics before freeing skb */
1557 priv->stats.tx_packets++;
1558 priv->stats.tx_bytes += skb->len;
1559
1560 dma_unmap_single(&dev->dev, (dma_addr_t) qfec_bd_pbuf_get(p_bd),
1561 skb->len, DMA_TO_DEVICE);
1562
1563 dev_kfree_skb_any(skb);
1564 qfec_bd_skbuf_set(p_bd, NULL);
1565
1566 qfec_ring_tail_adv(p_ring);
1567 p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1568 }
1569
1570 spin_unlock_irqrestore(&priv->xmit_lock, flags);
1571
1572 qfec_queue_start(dev);
1573
1574 return 0;
1575}
1576
1577/*
1578 * clear ownership bits of all TX buf-desc and release the sk-bufs
1579 */
1580static void qfec_tx_timeout(struct net_device *dev)
1581{
1582 struct qfec_priv *priv = netdev_priv(dev);
1583 struct buf_desc *bd = priv->p_tbd;
1584 int n;
1585
1586 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1587 CNTR_INC(priv, tx_timeout);
1588
1589 for (n = 0; n < priv->n_tbd; n++, bd++)
1590 qfec_bd_own_clr(bd);
1591
1592 qfec_tx_replenish(dev);
1593}
1594
1595/*
1596 * rx() - process a received frame
1597 */
1598static void qfec_rx_int(struct net_device *dev)
1599{
1600 struct qfec_priv *priv = netdev_priv(dev);
1601 struct ring *p_ring = &priv->ring_rbd;
1602 struct buf_desc *p_bd = priv->p_latest_rbd;
1603 uint32_t desc_status;
1604 uint32_t mis_fr_reg;
1605
1606 desc_status = qfec_bd_status_get(p_bd);
1607 mis_fr_reg = qfec_reg_read(priv, MIS_FR_REG);
1608
1609 CNTR_INC(priv, rx_int);
1610
1611 /* check that valid interrupt occurred */
1612 if (unlikely(desc_status & BUF_OWN)) {
1613 char s[100];
1614
1615 qfec_bd_fmt(s, sizeof(s), p_bd);
1616 QFEC_LOG_ERR("%s: owned by DMA, %08x, %s\n", __func__,
1617 qfec_reg_read(priv, CUR_HOST_RX_DES_REG), s);
1618 CNTR_INC(priv, rx_owned);
1619 return;
1620 }
1621
1622 /* accumulate missed-frame count (reg reset when read) */
1623 priv->stats.rx_missed_errors += mis_fr_reg
1624 & MIS_FR_REG_MISS_CNT;
1625
1626 /* process all unowned frames */
1627 while (!(desc_status & BUF_OWN) && (!qfec_ring_full(p_ring))) {
1628 struct sk_buff *skb;
1629 struct buf_desc *p_bd_next;
1630
1631 skb = qfec_bd_skbuf_get(p_bd);
1632
1633 if (unlikely(skb == NULL)) {
1634 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1635 CNTR_INC(priv, rx_skb_null);
1636 break;
1637 }
1638
1639 /* cache coherency before skb->data is accessed */
1640 dma_unmap_single(&dev->dev,
1641 (dma_addr_t) qfec_bd_phys_get(p_bd),
1642 ETH_BUF_SIZE, DMA_FROM_DEVICE);
1643 prefetch(skb->data);
1644
1645 if (unlikely(desc_status & BUF_RX_ES)) {
1646 priv->stats.rx_dropped++;
1647 CNTR_INC(priv, rx_dropped);
1648 dev_kfree_skb(skb);
1649 } else {
1650 qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
1651
1652 skb->len = BUF_RX_FL_GET_FROM_STATUS(desc_status);
1653
1654 if (priv->state & timestamping) {
1655 CNTR_INC(priv, ts_rec);
1656 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1657 }
1658
1659 /* update statistics before freeing skb */
1660 priv->stats.rx_packets++;
1661 priv->stats.rx_bytes += skb->len;
1662
1663 skb->dev = dev;
1664 skb->protocol = eth_type_trans(skb, dev);
1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666
1667 if (NET_RX_DROP == netif_rx(skb)) {
1668 priv->stats.rx_dropped++;
1669 CNTR_INC(priv, rx_dropped);
1670 }
1671 CNTR_INC(priv, netif_rx_cntr);
1672 }
1673
1674 if (p_bd != priv->p_ending_rbd)
1675 p_bd_next = p_bd + 1;
1676 else
1677 p_bd_next = priv->p_rbd;
1678 desc_status = qfec_bd_status_get(p_bd_next);
1679
1680 qfec_bd_skbuf_set(p_bd, NULL);
1681
1682 qfec_ring_head_adv(p_ring);
1683 p_bd = p_bd_next;
1684 }
1685
1686 priv->p_latest_rbd = p_bd;
1687
1688 /* replenish bufs */
1689 while (!qfec_ring_empty(p_ring)) {
1690 if (qfec_rbd_init(dev, &priv->p_rbd[qfec_ring_tail(p_ring)]))
1691 break;
1692 qfec_ring_tail_adv(p_ring);
1693 }
1694}
1695
1696/*
1697 * isr() - interrupt service routine
1698 * determine cause of interrupt and invoke/schedule appropriate
1699 * processing or error handling
1700 */
1701#define ISR_ERR_CHK(priv, status, interrupt, cntr) \
1702 if (status & interrupt) \
1703 CNTR_INC(priv, cntr)
1704
1705static irqreturn_t qfec_int(int irq, void *dev_id)
1706{
1707 struct net_device *dev = dev_id;
1708 struct qfec_priv *priv = netdev_priv(dev);
1709 uint32_t status = qfec_reg_read(priv, STATUS_REG);
1710 uint32_t int_bits = STATUS_REG_NIS | STATUS_REG_AIS;
1711
1712 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, dev->name);
1713
1714 /* abnormal interrupt */
1715 if (status & STATUS_REG_AIS) {
1716 QFEC_LOG(QFEC_LOG_DBG, "%s: abnormal status 0x%08x\n",
1717 __func__, status);
1718
1719 ISR_ERR_CHK(priv, status, STATUS_REG_RU, rx_buf_unavail);
1720 ISR_ERR_CHK(priv, status, STATUS_REG_FBI, fatal_bus);
1721
1722 ISR_ERR_CHK(priv, status, STATUS_REG_RWT, rx_watchdog);
1723 ISR_ERR_CHK(priv, status, STATUS_REG_RPS, rx_proc_stopped);
1724 ISR_ERR_CHK(priv, status, STATUS_REG_UNF, tx_underflow);
1725
1726 ISR_ERR_CHK(priv, status, STATUS_REG_OVF, rx_overflow);
1727 ISR_ERR_CHK(priv, status, STATUS_REG_TJT, tx_jabber_tmout);
1728 ISR_ERR_CHK(priv, status, STATUS_REG_TPS, tx_proc_stopped);
1729
1730 int_bits |= STATUS_REG_AIS_BITS;
1731 CNTR_INC(priv, abnorm_int);
1732 }
1733
1734 if (status & STATUS_REG_NIS)
1735 CNTR_INC(priv, norm_int);
1736
1737 /* receive interrupt */
1738 if (status & STATUS_REG_RI) {
1739 CNTR_INC(priv, rx_isr);
1740 qfec_rx_int(dev);
1741 }
1742
1743 /* transmit interrupt */
1744 if (status & STATUS_REG_TI) {
1745 CNTR_INC(priv, tx_isr);
1746 qfec_tx_replenish(dev);
1747 }
1748
1749 /* gmac interrupt */
1750 if (status & (STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI)) {
1751 CNTR_INC(priv, gmac_isr);
1752 int_bits |= STATUS_REG_GMI;
1753 }
1754
1755 /* clear interrupts */
1756 qfec_reg_write(priv, STATUS_REG, int_bits);
1757 CNTR_INC(priv, isr);
1758
1759 return IRQ_HANDLED;
1760}
1761
1762/*
1763 * open () - register system resources (IRQ, DMA, ...)
1764 * turn on HW, perform device setup.
1765 */
1766static int qfec_open(struct net_device *dev)
1767{
1768 struct qfec_priv *priv = netdev_priv(dev);
1769 struct buf_desc *p_bd;
1770 struct ring *p_ring;
1771 struct qfec_buf_desc *p_desc;
1772 int n;
1773 int res = 0;
1774
1775 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1776
1777 if (!dev) {
1778 res = -EINVAL;
1779 goto err;
1780 }
1781
1782 /* allocate TX/RX buffer-descriptors and buffers */
1783
1784 res = qfec_mem_alloc(dev);
1785 if (res)
1786 goto err;
1787
1788 /* initialize TX */
1789 p_desc = priv->bd_base;
1790
1791 for (n = 0, p_bd = priv->p_tbd; n < priv->n_tbd; n++, p_bd++) {
1792 p_bd->p_desc = p_desc++;
1793
1794 if (n == (priv->n_tbd - 1))
1795 qfec_bd_last_bd_set(p_bd);
1796
1797 qfec_bd_own_clr(p_bd); /* clear ownership */
1798 }
1799
1800 qfec_ring_init(&priv->ring_tbd, priv->n_tbd, priv->n_tbd);
1801
1802 priv->tx_ic_mod = priv->n_tbd / TX_BD_TI_RATIO;
1803 if (priv->tx_ic_mod == 0)
1804 priv->tx_ic_mod = 1;
1805
1806 /* initialize RX buffer descriptors and allocate sk_bufs */
1807 p_ring = &priv->ring_rbd;
1808 qfec_ring_init(p_ring, priv->n_rbd, 0);
1809 qfec_bd_last_bd_set(&priv->p_rbd[priv->n_rbd - 1]);
1810
1811 for (n = 0, p_bd = priv->p_rbd; n < priv->n_rbd; n++, p_bd++) {
1812 p_bd->p_desc = p_desc++;
1813
1814 if (qfec_rbd_init(dev, p_bd))
1815 break;
1816 qfec_ring_tail_adv(p_ring);
1817 }
1818
1819 priv->p_latest_rbd = priv->p_rbd;
1820 priv->p_ending_rbd = priv->p_rbd + priv->n_rbd - 1;
1821
1822 /* config ptp clock */
1823 qfec_ptp_cfg(priv);
1824
1825 /* configure PHY - must be set before reset/hw_init */
1826 qfec_intf_sel(priv, intfc_mii);
1827
1828 /* initialize controller after BDs allocated */
1829 res = qfec_hw_init(priv);
1830 if (res)
1831 goto err1;
1832
1833 /* get/set (primary) MAC address */
1834 qfec_set_adr_regs(priv, dev->dev_addr);
Rohit Vaswani0565a2d2011-09-15 12:53:07 -07001835 qfec_set_rx_mode(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001836
1837 /* start phy monitor */
1838 QFEC_LOG(QFEC_LOG_DBG, " %s: start timer\n", __func__);
1839 netif_carrier_off(priv->net_dev);
1840 setup_timer(&priv->phy_tmr, qfec_phy_monitor, (unsigned long)dev);
1841 mod_timer(&priv->phy_tmr, jiffies + HZ);
1842
1843 /* initialize interrupts */
1844 QFEC_LOG(QFEC_LOG_DBG, " %s: request irq %d\n", __func__, dev->irq);
1845 res = request_irq(dev->irq, qfec_int, 0, dev->name, dev);
1846 if (res)
1847 goto err1;
1848
1849 /* enable controller */
1850 qfec_hw_enable(priv);
1851 netif_start_queue(dev);
1852
1853 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x link, %08x carrier\n", __func__,
1854 mii_link_ok(&priv->mii), netif_carrier_ok(priv->net_dev));
1855
1856 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1857 return 0;
1858
1859err1:
1860 qfec_mem_dealloc(dev);
1861err:
1862 QFEC_LOG_ERR("%s: error - %d\n", __func__, res);
1863 return res;
1864}
1865
1866/*
1867 * stop() - "reverse operations performed at open time"
1868 */
1869static int qfec_stop(struct net_device *dev)
1870{
1871 struct qfec_priv *priv = netdev_priv(dev);
1872 struct buf_desc *p_bd;
1873 struct sk_buff *skb;
1874 int n;
1875
1876 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1877
1878 del_timer_sync(&priv->phy_tmr);
1879
1880 qfec_hw_disable(priv);
1881 qfec_queue_stop(dev);
1882 free_irq(dev->irq, dev);
1883
1884 /* free all pending sk_bufs */
1885 for (n = priv->n_rbd, p_bd = priv->p_rbd; n > 0; n--, p_bd++) {
1886 skb = qfec_bd_skbuf_get(p_bd);
1887 if (skb)
1888 dev_kfree_skb(skb);
1889 }
1890
1891 for (n = priv->n_tbd, p_bd = priv->p_tbd; n > 0; n--, p_bd++) {
1892 skb = qfec_bd_skbuf_get(p_bd);
1893 if (skb)
1894 dev_kfree_skb(skb);
1895 }
1896
1897 qfec_mem_dealloc(dev);
1898
1899 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1900
1901 return 0;
1902}
1903
1904static int qfec_set_config(struct net_device *dev, struct ifmap *map)
1905{
1906 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1907 return 0;
1908}
1909
1910/*
1911 * pass data from skbuf to buf-desc
1912 */
1913static int qfec_xmit(struct sk_buff *skb, struct net_device *dev)
1914{
1915 struct qfec_priv *priv = netdev_priv(dev);
1916 struct ring *p_ring = &priv->ring_tbd;
1917 struct buf_desc *p_bd;
1918 uint32_t ctrl = 0;
1919 int ret = NETDEV_TX_OK;
1920 unsigned long flags;
1921
1922 CNTR_INC(priv, xmit);
1923
1924 spin_lock_irqsave(&priv->xmit_lock, flags);
1925
1926 /* stop queuing if no resources available */
1927 if (qfec_ring_room(p_ring) == 0) {
1928 qfec_queue_stop(dev);
1929 CNTR_INC(priv, tx_no_resource);
1930
1931 ret = NETDEV_TX_BUSY;
1932 goto done;
1933 }
1934
1935 /* locate and save *sk_buff */
1936 p_bd = &priv->p_tbd[qfec_ring_head(p_ring)];
1937 qfec_bd_skbuf_set(p_bd, skb);
1938
1939 /* set DMA ptr to sk_buff data and write cache to memory */
1940 qfec_bd_pbuf_set(p_bd, (void *)
1941 dma_map_single(&dev->dev,
1942 (void *)skb->data, skb->len, DMA_TO_DEVICE));
1943
1944 ctrl = skb->len;
1945 if (!(qfec_ring_head(p_ring) % priv->tx_ic_mod))
1946 ctrl |= BUF_TX_IC; /* interrupt on complete */
1947
1948 /* check if timestamping enabled and requested */
1949 if (priv->state & timestamping) {
1950 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1951 CNTR_INC(priv, ts_tx_en);
1952 ctrl |= BUF_TX_IC; /* interrupt on complete */
1953 ctrl |= BUF_TX_TTSE; /* enable timestamp */
1954 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1955 }
1956 }
1957
1958 if (qfec_bd_last_bd(p_bd))
1959 ctrl |= BUF_RX_RER;
1960
1961 /* no gather, no multi buf frames */
1962 ctrl |= BUF_TX_FS | BUF_TX_LS; /* 1st and last segment */
1963
1964 qfec_bd_ctl_wr(p_bd, ctrl);
1965 qfec_bd_status_set(p_bd, BUF_OWN);
1966
1967 qfec_ring_head_adv(p_ring);
1968 qfec_reg_write(priv, TX_POLL_DEM_REG, 1); /* poll */
1969
1970done:
1971 spin_unlock_irqrestore(&priv->xmit_lock, flags);
1972
1973 return ret;
1974}
1975
1976static int qfec_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1977{
1978 struct qfec_priv *priv = netdev_priv(dev);
1979 struct hwtstamp_config *cfg = (struct hwtstamp_config *) ifr;
1980
1981 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1982
1983 if (cmd == SIOCSHWTSTAMP) {
1984 CNTR_INC(priv, ts_ioctl);
1985 QFEC_LOG(QFEC_LOG_DBG,
1986 "%s: SIOCSHWTSTAMP - %x flags %x tx %x rx\n",
1987 __func__, cfg->flags, cfg->tx_type, cfg->rx_filter);
1988
1989 cfg->flags = 0;
1990 cfg->tx_type = HWTSTAMP_TX_ON;
1991 cfg->rx_filter = HWTSTAMP_FILTER_ALL;
1992
1993 priv->state |= timestamping;
1994 qfec_reg_write(priv, TS_CTL_REG,
1995 qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSENALL);
1996
1997 return 0;
1998 }
1999
2000 return generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL);
2001}
2002
2003static struct net_device_stats *qfec_get_stats(struct net_device *dev)
2004{
2005 struct qfec_priv *priv = netdev_priv(dev);
2006
2007 QFEC_LOG(QFEC_LOG_DBG2, "qfec_stats:\n");
2008
Rohit Vaswani0565a2d2011-09-15 12:53:07 -07002009 priv->stats.multicast = qfec_reg_read(priv, NUM_MULTCST_FRM_RCVD_G);
2010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002011 return &priv->stats;
2012}
2013
2014/*
2015 * accept new mac address
2016 */
2017static int qfec_set_mac_address(struct net_device *dev, void *p)
2018{
2019 struct qfec_priv *priv = netdev_priv(dev);
2020 struct sockaddr *addr = p;
2021
2022 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2023
2024 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2025
2026 qfec_set_adr_regs(priv, dev->dev_addr);
2027
2028 return 0;
2029}
2030
2031/*
2032 * read discontinuous MAC address from corrected fuse memory region
2033 */
2034
2035static int qfec_get_mac_address(char *buf, char *mac_base, int nBytes)
2036{
2037 static int offset[] = { 0, 1, 2, 3, 4, 8 };
2038 int n;
2039
2040 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2041
2042 for (n = 0; n < nBytes; n++)
2043 buf[n] = ioread8(mac_base + offset[n]);
2044
2045 /* check that MAC programmed */
2046 if ((buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5]) == 0) {
2047 QFEC_LOG_ERR("%s: null MAC address\n", __func__);
2048 return -ENODATA;
2049 }
2050
2051 return 0;
2052}
2053
2054/*
2055 * static definition of driver functions
2056 */
2057static const struct net_device_ops qfec_netdev_ops = {
2058 .ndo_open = qfec_open,
2059 .ndo_stop = qfec_stop,
2060 .ndo_start_xmit = qfec_xmit,
2061
2062 .ndo_do_ioctl = qfec_do_ioctl,
2063 .ndo_tx_timeout = qfec_tx_timeout,
2064 .ndo_set_mac_address = qfec_set_mac_address,
Rohit Vaswani0565a2d2011-09-15 12:53:07 -07002065 .ndo_set_multicast_list = qfec_set_rx_mode,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002066
2067 .ndo_change_mtu = eth_change_mtu,
2068 .ndo_validate_addr = eth_validate_addr,
2069
2070 .ndo_get_stats = qfec_get_stats,
2071 .ndo_set_config = qfec_set_config,
2072};
2073
2074/*
2075 * ethtool functions
2076 */
2077
2078static int qfec_nway_reset(struct net_device *dev)
2079{
2080 struct qfec_priv *priv = netdev_priv(dev);
2081 return mii_nway_restart(&priv->mii);
2082}
2083
2084/*
2085 * speed, duplex, auto-neg settings
2086 */
2087static void qfec_ethtool_getpauseparam(struct net_device *dev,
2088 struct ethtool_pauseparam *pp)
2089{
2090 struct qfec_priv *priv = netdev_priv(dev);
2091 u32 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
2092 u32 advert;
2093
2094 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2095
2096 /* report current settings */
2097 pp->tx_pause = (flow & FLOW_CONTROL_TFE) != 0;
2098 pp->rx_pause = (flow & FLOW_CONTROL_RFE) != 0;
2099
2100 /* report if pause is being advertised */
2101 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
2102 pp->autoneg =
2103 (advert & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2104}
2105
2106static int qfec_ethtool_setpauseparam(struct net_device *dev,
2107 struct ethtool_pauseparam *pp)
2108{
2109 struct qfec_priv *priv = netdev_priv(dev);
2110 u32 advert;
2111
2112 QFEC_LOG(QFEC_LOG_DBG, "%s: %d aneg, %d rx, %d tx\n", __func__,
2113 pp->autoneg, pp->rx_pause, pp->tx_pause);
2114
2115 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
2116 advert &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2117
2118 /* If pause autonegotiation is enabled, but both rx and tx are not
2119 * because neither was specified in the ethtool cmd,
2120 * enable both symetrical and asymetrical pause.
2121 * otherwise, only enable the pause mode indicated by rx/tx.
2122 */
2123 if (pp->autoneg) {
2124 if (pp->rx_pause)
2125 advert |= ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP;
2126 else if (pp->tx_pause)
2127 advert |= ADVERTISE_PAUSE_ASYM;
2128 else
2129 advert |= ADVERTISE_PAUSE_CAP;
2130 }
2131
2132 qfec_mdio_write(dev, priv->phy_id, MII_ADVERTISE, advert);
2133
2134 return 0;
2135}
2136
2137/*
2138 * ethtool ring parameter (-g/G) support
2139 */
2140
2141/*
2142 * setringparamam - change the tx/rx ring lengths
2143 */
2144#define MIN_RING_SIZE 3
2145#define MAX_RING_SIZE 1000
2146static int qfec_ethtool_setringparam(struct net_device *dev,
2147 struct ethtool_ringparam *ring)
2148{
2149 struct qfec_priv *priv = netdev_priv(dev);
2150 u32 timeout = 20;
2151
2152 /* notify stack the link is down */
2153 netif_carrier_off(dev);
2154
2155 /* allow tx to complete & free skbufs on the tx ring */
2156 do {
2157 usleep_range(10000, 100000);
2158 qfec_tx_replenish(dev);
2159
2160 if (timeout-- == 0) {
2161 QFEC_LOG_ERR("%s: timeout\n", __func__);
2162 return -ETIME;
2163 }
2164 } while (!qfec_ring_empty(&priv->ring_tbd));
2165
2166
2167 qfec_stop(dev);
2168
2169 /* set tx ring size */
2170 if (ring->tx_pending < MIN_RING_SIZE)
2171 ring->tx_pending = MIN_RING_SIZE;
2172 else if (ring->tx_pending > MAX_RING_SIZE)
2173 ring->tx_pending = MAX_RING_SIZE;
2174 priv->n_tbd = ring->tx_pending;
2175
2176 /* set rx ring size */
2177 if (ring->rx_pending < MIN_RING_SIZE)
2178 ring->rx_pending = MIN_RING_SIZE;
2179 else if (ring->rx_pending > MAX_RING_SIZE)
2180 ring->rx_pending = MAX_RING_SIZE;
2181 priv->n_rbd = ring->rx_pending;
2182
2183
2184 qfec_open(dev);
2185
2186 return 0;
2187}
2188
2189/*
2190 * getringparamam - returns local values
2191 */
2192static void qfec_ethtool_getringparam(struct net_device *dev,
2193 struct ethtool_ringparam *ring)
2194{
2195 struct qfec_priv *priv = netdev_priv(dev);
2196
2197 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2198
2199 ring->rx_max_pending = MAX_RING_SIZE;
2200 ring->rx_mini_max_pending = 0;
2201 ring->rx_jumbo_max_pending = 0;
2202 ring->tx_max_pending = MAX_RING_SIZE;
2203
2204 ring->rx_pending = priv->n_rbd;
2205 ring->rx_mini_pending = 0;
2206 ring->rx_jumbo_pending = 0;
2207 ring->tx_pending = priv->n_tbd;
2208}
2209
2210/*
2211 * speed, duplex, auto-neg settings
2212 */
2213static int
2214qfec_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2215{
2216 struct qfec_priv *priv = netdev_priv(dev);
2217
2218 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2219
2220 cmd->maxrxpkt = priv->n_rbd;
2221 cmd->maxtxpkt = priv->n_tbd;
2222
2223 return mii_ethtool_gset(&priv->mii, cmd);
2224}
2225
2226static int
2227qfec_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2228{
2229 struct qfec_priv *priv = netdev_priv(dev);
2230
2231 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2232
2233 return mii_ethtool_sset(&priv->mii, cmd);
2234}
2235
2236/*
2237 * msg/debug level
2238 */
2239static u32 qfec_ethtool_getmsglevel(struct net_device *dev)
2240{
2241 return qfec_debug;
2242}
2243
2244static void qfec_ethtool_setmsglevel(struct net_device *dev, u32 level)
2245{
2246 qfec_debug ^= level; /* toggle on/off */
2247}
2248
2249/*
2250 * register dump
2251 */
2252#define DMA_DMP_OFFSET 0x0000
2253#define DMA_REG_OFFSET 0x1000
2254#define DMA_REG_LEN 23
2255
2256#define MAC_DMP_OFFSET 0x0080
2257#define MAC_REG_OFFSET 0x0000
2258#define MAC_REG_LEN 55
2259
2260#define TS_DMP_OFFSET 0x0180
2261#define TS_REG_OFFSET 0x0700
2262#define TS_REG_LEN 15
2263
2264#define MDIO_DMP_OFFSET 0x0200
2265#define MDIO_REG_LEN 16
2266
2267#define REG_SIZE (MDIO_DMP_OFFSET + (MDIO_REG_LEN * sizeof(short)))
2268
2269static int qfec_ethtool_getregs_len(struct net_device *dev)
2270{
2271 return REG_SIZE;
2272}
2273
2274static void
2275qfec_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
2276 void *buf)
2277{
2278 struct qfec_priv *priv = netdev_priv(dev);
2279 u32 *data = buf;
2280 u16 *data16;
2281 unsigned int i;
2282 unsigned int j;
2283 unsigned int n;
2284
2285 memset(buf, 0, REG_SIZE);
2286
2287 j = DMA_DMP_OFFSET / sizeof(u32);
2288 for (i = DMA_REG_OFFSET, n = DMA_REG_LEN; n--; i += sizeof(u32))
2289 data[j++] = htonl(qfec_reg_read(priv, i));
2290
2291 j = MAC_DMP_OFFSET / sizeof(u32);
2292 for (i = MAC_REG_OFFSET, n = MAC_REG_LEN; n--; i += sizeof(u32))
2293 data[j++] = htonl(qfec_reg_read(priv, i));
2294
2295 j = TS_DMP_OFFSET / sizeof(u32);
2296 for (i = TS_REG_OFFSET, n = TS_REG_LEN; n--; i += sizeof(u32))
2297 data[j++] = htonl(qfec_reg_read(priv, i));
2298
2299 data16 = (u16 *)&data[MDIO_DMP_OFFSET / sizeof(u32)];
2300 for (i = 0, n = 0; i < MDIO_REG_LEN; i++)
2301 data16[n++] = htons(qfec_mdio_read(dev, 0, i));
2302
2303 regs->len = REG_SIZE;
2304
2305 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, regs->len);
2306}
2307
2308/*
2309 * statistics
2310 * return counts of various ethernet activity.
2311 * many of these are same as in struct net_device_stats
2312 *
2313 * missed-frames indicates the number of attempts made by the ethernet
2314 * controller to write to a buffer-descriptor when the BD ownership
2315 * bit was not set. The rxfifooverflow counter (0x1D4) is not
2316 * available. The Missed Frame and Buffer Overflow Counter register
2317 * (0x1020) is used, but has only 16-bits and is reset when read.
2318 * It is read and updates the value in priv->stats.rx_missed_errors
2319 * in qfec_rx_int().
2320 */
2321static char qfec_stats_strings[][ETH_GSTRING_LEN] = {
2322 "TX good/bad Bytes ",
2323 "TX Bytes ",
2324 "TX good/bad Frames ",
2325 "TX Bcast Frames ",
2326 "TX Mcast Frames ",
2327 "TX Unicast Frames ",
2328 "TX Pause Frames ",
2329 "TX Vlan Frames ",
2330 "TX Frames 64 ",
2331 "TX Frames 65-127 ",
2332 "TX Frames 128-255 ",
2333 "TX Frames 256-511 ",
2334 "TX Frames 512-1023 ",
2335 "TX Frames 1024+ ",
2336 "TX Pause Frames ",
2337 "TX Collisions ",
2338 "TX Late Collisions ",
2339 "TX Excessive Collisions ",
2340
2341 "RX good/bad Bytes ",
2342 "RX Bytes ",
2343 "RX good/bad Frames ",
2344 "RX Bcast Frames ",
2345 "RX Mcast Frames ",
2346 "RX Unicast Frames ",
2347 "RX Pause Frames ",
2348 "RX Vlan Frames ",
2349 "RX Frames 64 ",
2350 "RX Frames 65-127 ",
2351 "RX Frames 128-255 ",
2352 "RX Frames 256-511 ",
2353 "RX Frames 512-1023 ",
2354 "RX Frames 1024+ ",
2355 "RX Pause Frames ",
2356 "RX Crc error Frames ",
2357 "RX Length error Frames ",
2358 "RX Alignment error Frames ",
2359 "RX Runt Frames ",
2360 "RX Oversize Frames ",
2361 "RX Missed Frames ",
2362
2363};
2364
2365static u32 qfec_stats_regs[] = {
2366
2367 69, 89, 70, 71, 72, 90, 92, 93,
2368 73, 74, 75, 76, 77, 78, 92, 84,
2369 86, 87,
2370
2371 97, 98, 96, 99, 100, 113, 116, 118,
2372 107, 108, 109, 110, 111, 112, 116, 101,
2373 114, 102, 103, 106
2374};
2375
2376static int qfec_stats_show(struct device *dev, struct device_attribute *attr,
2377 char *buf)
2378{
2379 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
2380 int count = PAGE_SIZE;
2381 int l = 0;
2382 int n;
2383
2384 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
2385
2386 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++) {
2387 l += snprintf(&buf[l], count - l, " %12u %s\n",
2388 qfec_reg_read(priv,
2389 qfec_stats_regs[n] * sizeof(uint32_t)),
2390 qfec_stats_strings[n]);
2391 }
2392
2393 return l;
2394}
2395
2396static int qfec_get_sset_count(struct net_device *dev, int sset)
2397{
2398 switch (sset) {
2399 case ETH_SS_STATS:
2400 return ARRAY_SIZE(qfec_stats_regs) + 1; /* missed frames */
2401
2402 default:
2403 return -EOPNOTSUPP;
2404 }
2405}
2406
2407static void qfec_ethtool_getstrings(struct net_device *dev, u32 stringset,
2408 u8 *buf)
2409{
2410 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__,
2411 sizeof(qfec_stats_strings));
2412
2413 memcpy(buf, qfec_stats_strings, sizeof(qfec_stats_strings));
2414}
2415
2416static void qfec_ethtool_getstats(struct net_device *dev,
2417 struct ethtool_stats *stats, uint64_t *data)
2418{
2419 struct qfec_priv *priv = netdev_priv(dev);
2420 int j = 0;
2421 int n;
2422
2423 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)
2424 data[j++] = qfec_reg_read(priv,
2425 qfec_stats_regs[n] * sizeof(uint32_t));
2426
2427 data[j++] = priv->stats.rx_missed_errors;
2428
2429 stats->n_stats = j;
2430}
2431
2432static void qfec_ethtool_getdrvinfo(struct net_device *dev,
2433 struct ethtool_drvinfo *info)
2434{
2435 strlcpy(info->driver, QFEC_NAME, sizeof(info->driver));
2436 strlcpy(info->version, QFEC_DRV_VER, sizeof(info->version));
2437 strlcpy(info->bus_info, dev_name(dev->dev.parent),
2438 sizeof(info->bus_info));
2439
2440 info->eedump_len = 0;
2441 info->regdump_len = qfec_ethtool_getregs_len(dev);
2442}
2443
2444/*
2445 * ethtool ops table
2446 */
2447static const struct ethtool_ops qfec_ethtool_ops = {
2448 .nway_reset = qfec_nway_reset,
2449
2450 .get_settings = qfec_ethtool_getsettings,
2451 .set_settings = qfec_ethtool_setsettings,
2452 .get_link = ethtool_op_get_link,
2453 .get_drvinfo = qfec_ethtool_getdrvinfo,
2454 .get_msglevel = qfec_ethtool_getmsglevel,
2455 .set_msglevel = qfec_ethtool_setmsglevel,
2456 .get_regs_len = qfec_ethtool_getregs_len,
2457 .get_regs = qfec_ethtool_getregs,
2458
2459 .get_ringparam = qfec_ethtool_getringparam,
2460 .set_ringparam = qfec_ethtool_setringparam,
2461
2462 .get_pauseparam = qfec_ethtool_getpauseparam,
2463 .set_pauseparam = qfec_ethtool_setpauseparam,
2464
2465 .get_sset_count = qfec_get_sset_count,
2466 .get_strings = qfec_ethtool_getstrings,
2467 .get_ethtool_stats = qfec_ethtool_getstats,
2468};
2469
2470/*
2471 * create sysfs entries
2472 */
2473static DEVICE_ATTR(bd_tx, 0444, qfec_bd_tx_show, NULL);
2474static DEVICE_ATTR(bd_rx, 0444, qfec_bd_rx_show, NULL);
2475static DEVICE_ATTR(cfg, 0444, qfec_config_show, NULL);
2476static DEVICE_ATTR(clk_reg, 0444, qfec_clk_reg_show, NULL);
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002477static DEVICE_ATTR(cmd, 0222, NULL, qfec_cmd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002478static DEVICE_ATTR(cntrs, 0444, qfec_cntrs_show, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002479static DEVICE_ATTR(reg, 0444, qfec_reg_show, NULL);
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002480static DEVICE_ATTR(stats, 0444, qfec_stats_show, NULL);
2481static DEVICE_ATTR(tstamp, 0444, qfec_tstamp_show, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002482
2483static void qfec_sysfs_create(struct net_device *dev)
2484{
2485 if (device_create_file(&(dev->dev), &dev_attr_bd_tx) ||
2486 device_create_file(&(dev->dev), &dev_attr_bd_rx) ||
2487 device_create_file(&(dev->dev), &dev_attr_cfg) ||
2488 device_create_file(&(dev->dev), &dev_attr_clk_reg) ||
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002489 device_create_file(&(dev->dev), &dev_attr_cmd) ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002490 device_create_file(&(dev->dev), &dev_attr_cntrs) ||
2491 device_create_file(&(dev->dev), &dev_attr_reg) ||
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002492 device_create_file(&(dev->dev), &dev_attr_stats) ||
2493 device_create_file(&(dev->dev), &dev_attr_tstamp))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002494 pr_err("qfec_sysfs_create failed to create sysfs files\n");
2495}
2496
2497/*
2498 * map a specified resource
2499 */
2500static int qfec_map_resource(struct platform_device *plat, int resource,
2501 struct resource **priv_res,
2502 void **addr)
2503{
2504 struct resource *res;
2505
2506 QFEC_LOG(QFEC_LOG_DBG, "%s: 0x%x resource\n", __func__, resource);
2507
2508 /* allocate region to access controller registers */
2509 *priv_res = res = platform_get_resource(plat, resource, 0);
2510 if (!res) {
2511 QFEC_LOG_ERR("%s: platform_get_resource failed\n", __func__);
2512 return -ENODEV;
2513 }
2514
2515 res = request_mem_region(res->start, res->end - res->start, QFEC_NAME);
2516 if (!res) {
2517 QFEC_LOG_ERR("%s: request_mem_region failed, %08x %08x\n",
2518 __func__, res->start, res->end - res->start);
2519 return -EBUSY;
2520 }
2521
2522 *addr = ioremap(res->start, res->end - res->start);
2523 if (!*addr)
2524 return -ENOMEM;
2525
2526 QFEC_LOG(QFEC_LOG_DBG, " %s: io mapped from %p to %p\n",
2527 __func__, (void *)res->start, *addr);
2528
2529 return 0;
2530};
2531
2532/*
2533 * free allocated io regions
2534 */
2535static void qfec_free_res(struct resource *res, void *base)
2536{
2537
2538 if (res) {
2539 if (base)
2540 iounmap((void __iomem *)base);
2541
2542 release_mem_region(res->start, res->end - res->start);
2543 }
2544};
2545
2546/*
2547 * probe function that obtain configuration info and allocate net_device
2548 */
2549static int __devinit qfec_probe(struct platform_device *plat)
2550{
2551 struct net_device *dev;
2552 struct qfec_priv *priv;
2553 int ret = 0;
2554
2555 /* allocate device */
2556 dev = alloc_etherdev(sizeof(struct qfec_priv));
2557 if (!dev) {
2558 QFEC_LOG_ERR("%s: alloc_etherdev failed\n", __func__);
2559 ret = -ENOMEM;
2560 goto err;
2561 }
2562
2563 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x dev\n", __func__, (int)dev);
2564
2565 qfec_dev = dev;
2566 SET_NETDEV_DEV(dev, &plat->dev);
2567
2568 dev->netdev_ops = &qfec_netdev_ops;
2569 dev->ethtool_ops = &qfec_ethtool_ops;
2570 dev->watchdog_timeo = 2 * HZ;
2571 dev->irq = platform_get_irq(plat, 0);
2572
2573 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2574
2575 /* initialize private data */
2576 priv = (struct qfec_priv *)netdev_priv(dev);
2577 memset((void *)priv, 0, sizeof(priv));
2578
2579 priv->net_dev = dev;
2580 platform_set_drvdata(plat, dev);
2581
2582 priv->n_tbd = TX_BD_NUM;
2583 priv->n_rbd = RX_BD_NUM;
2584
2585 /* initialize phy structure */
2586 priv->mii.phy_id_mask = 0x1F;
2587 priv->mii.reg_num_mask = 0x1F;
2588 priv->mii.dev = dev;
2589 priv->mii.mdio_read = qfec_mdio_read;
2590 priv->mii.mdio_write = qfec_mdio_write;
2591
2592 /* map register regions */
2593 ret = qfec_map_resource(
2594 plat, IORESOURCE_MEM, &priv->mac_res, &priv->mac_base);
2595 if (ret) {
2596 QFEC_LOG_ERR("%s: IORESOURCE_MEM mac failed\n", __func__);
2597 goto err1;
2598 }
2599
2600 ret = qfec_map_resource(
2601 plat, IORESOURCE_IO, &priv->clk_res, &priv->clk_base);
2602 if (ret) {
2603 QFEC_LOG_ERR("%s: IORESOURCE_IO clk failed\n", __func__);
2604 goto err2;
2605 }
2606
2607 ret = qfec_map_resource(
2608 plat, IORESOURCE_DMA, &priv->fuse_res, &priv->fuse_base);
2609 if (ret) {
2610 QFEC_LOG_ERR("%s: IORESOURCE_DMA fuse failed\n", __func__);
2611 goto err3;
2612 }
2613
2614 /* initialize MAC addr */
2615 ret = qfec_get_mac_address(dev->dev_addr, priv->fuse_base,
2616 MAC_ADDR_SIZE);
2617 if (ret)
2618 goto err4;
2619
2620 QFEC_LOG(QFEC_LOG_DBG, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
2621 __func__,
2622 dev->dev_addr[0], dev->dev_addr[1],
2623 dev->dev_addr[2], dev->dev_addr[3],
2624 dev->dev_addr[4], dev->dev_addr[5]);
2625
2626 ret = register_netdev(dev);
2627 if (ret) {
2628 QFEC_LOG_ERR("%s: register_netdev failed\n", __func__);
2629 goto err4;
2630 }
2631
2632 spin_lock_init(&priv->mdio_lock);
2633 spin_lock_init(&priv->xmit_lock);
2634 qfec_sysfs_create(dev);
2635
2636 return 0;
2637
2638 /* error handling */
2639err4:
2640 qfec_free_res(priv->fuse_res, priv->fuse_base);
2641err3:
2642 qfec_free_res(priv->clk_res, priv->clk_base);
2643err2:
2644 qfec_free_res(priv->mac_res, priv->mac_base);
2645err1:
2646 free_netdev(dev);
2647err:
2648 QFEC_LOG_ERR("%s: err\n", __func__);
2649 return ret;
2650}
2651
2652/*
2653 * module remove
2654 */
2655static int __devexit qfec_remove(struct platform_device *plat)
2656{
2657 struct net_device *dev = platform_get_drvdata(plat);
2658 struct qfec_priv *priv = netdev_priv(dev);
2659
2660 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2661
2662 platform_set_drvdata(plat, NULL);
2663
2664 qfec_free_res(priv->fuse_res, priv->fuse_base);
2665 qfec_free_res(priv->clk_res, priv->clk_base);
2666 qfec_free_res(priv->mac_res, priv->mac_base);
2667
2668 unregister_netdev(dev);
2669 free_netdev(dev);
2670
2671 return 0;
2672}
2673
2674/*
2675 * module support
2676 * the FSM9xxx is not a mobile device does not support power management
2677 */
2678
2679static struct platform_driver qfec_driver = {
2680 .probe = qfec_probe,
2681 .remove = __devexit_p(qfec_remove),
2682 .driver = {
2683 .name = QFEC_NAME,
2684 .owner = THIS_MODULE,
2685 },
2686};
2687
2688/*
2689 * module init
2690 */
2691static int __init qfec_init_module(void)
2692{
2693 int res;
2694
2695 QFEC_LOG(QFEC_LOG_DBG, "%s: %s\n", __func__, qfec_driver.driver.name);
2696
2697 res = platform_driver_register(&qfec_driver);
2698
2699 QFEC_LOG(QFEC_LOG_DBG, "%s: %d - platform_driver_register\n",
2700 __func__, res);
2701
2702 return res;
2703}
2704
2705/*
2706 * module exit
2707 */
2708static void __exit qfec_exit_module(void)
2709{
2710 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2711
2712 platform_driver_unregister(&qfec_driver);
2713}
2714
2715MODULE_DESCRIPTION("FSM Network Driver");
2716MODULE_LICENSE("GPL v2");
2717MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
2718MODULE_VERSION("1.0");
2719
2720module_init(qfec_init_module);
2721module_exit(qfec_exit_module);