blob: fc278376050701853b590b46db2d21d477711487 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14
15#include <linux/platform_device.h>
16
17#include <linux/types.h> /* size_t */
18#include <linux/interrupt.h> /* mark_bh */
19
20#include <linux/netdevice.h> /* struct device, and other headers */
21#include <linux/etherdevice.h> /* eth_type_trans */
22#include <linux/skbuff.h>
23
24#include <linux/proc_fs.h>
25#include <linux/timer.h>
26#include <linux/mii.h>
27
28#include <linux/ethtool.h>
29#include <linux/net_tstamp.h>
30#include <linux/phy.h>
31#include <linux/inet.h>
32
33#include "qfec.h"
34
35#define QFEC_NAME "qfec"
Rohit Vaswani73299b42011-12-16 13:38:02 -080036#define QFEC_DRV_VER "Nov 29 2011"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#define ETH_BUF_SIZE 0x600
39#define MAX_N_BD 50
40#define MAC_ADDR_SIZE 6
41
42#define RX_TX_BD_RATIO 8
Rohit Vaswani73299b42011-12-16 13:38:02 -080043#define TX_BD_NUM 256
44#define RX_BD_NUM 256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#define TX_BD_TI_RATIO 4
Rohit Vaswani73299b42011-12-16 13:38:02 -080046#define MAX_MDIO_REG 32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
Rohit Vaswani73299b42011-12-16 13:38:02 -080048#define H_DPLX 0
49#define F_DPLX 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050/*
51 * logging macros
52 */
53#define QFEC_LOG_PR 1
54#define QFEC_LOG_DBG 2
55#define QFEC_LOG_DBG2 4
56#define QFEC_LOG_MDIO_W 8
57#define QFEC_LOG_MDIO_R 16
Rohit Vaswani73299b42011-12-16 13:38:02 -080058#define QFEC_MII_EXP_MASK (EXPANSION_LCWP | EXPANSION_ENABLENPAGE \
59 | EXPANSION_NPCAPABLE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060
61static int qfec_debug = QFEC_LOG_PR;
62
63#ifdef QFEC_DEBUG
64# define QFEC_LOG(flag, ...) \
65 do { \
66 if (flag & qfec_debug) \
67 pr_info(__VA_ARGS__); \
68 } while (0)
69#else
70# define QFEC_LOG(flag, ...)
71#endif
72
73#define QFEC_LOG_ERR(...) pr_err(__VA_ARGS__)
74
75/*
76 * driver buffer-descriptor
77 * contains the 4 word HW descriptor plus an additional 4-words.
78 * (See the DSL bits in the BUS-Mode register).
79 */
80#define BD_FLAG_LAST_BD 1
81
82struct buf_desc {
83 struct qfec_buf_desc *p_desc;
84 struct sk_buff *skb;
85 void *buf_virt_addr;
86 void *buf_phys_addr;
87 uint32_t last_bd_flag;
88};
89
90/*
91 *inline functions accessing non-struct qfec_buf_desc elements
92 */
93
94/* skb */
95static inline struct sk_buff *qfec_bd_skbuf_get(struct buf_desc *p_bd)
96{
97 return p_bd->skb;
98};
99
100static inline void qfec_bd_skbuf_set(struct buf_desc *p_bd, struct sk_buff *p)
101{
102 p_bd->skb = p;
103};
104
105/* virtual addr */
106static inline void qfec_bd_virt_set(struct buf_desc *p_bd, void *addr)
107{
108 p_bd->buf_virt_addr = addr;
109};
110
111static inline void *qfec_bd_virt_get(struct buf_desc *p_bd)
112{
113 return p_bd->buf_virt_addr;
114};
115
116/* physical addr */
117static inline void qfec_bd_phys_set(struct buf_desc *p_bd, void *addr)
118{
119 p_bd->buf_phys_addr = addr;
120};
121
122static inline void *qfec_bd_phys_get(struct buf_desc *p_bd)
123{
124 return p_bd->buf_phys_addr;
125};
126
127/* last_bd_flag */
128static inline uint32_t qfec_bd_last_bd(struct buf_desc *p_bd)
129{
130 return (p_bd->last_bd_flag != 0);
131};
132
133static inline void qfec_bd_last_bd_set(struct buf_desc *p_bd)
134{
135 p_bd->last_bd_flag = BD_FLAG_LAST_BD;
136};
137
138/*
139 *inline functions accessing struct qfec_buf_desc elements
140 */
141
142/* ownership bit */
143static inline uint32_t qfec_bd_own(struct buf_desc *p_bd)
144{
145 return p_bd->p_desc->status & BUF_OWN;
146};
147
148static inline void qfec_bd_own_set(struct buf_desc *p_bd)
149{
150 p_bd->p_desc->status |= BUF_OWN ;
151};
152
153static inline void qfec_bd_own_clr(struct buf_desc *p_bd)
154{
155 p_bd->p_desc->status &= ~(BUF_OWN);
156};
157
158static inline uint32_t qfec_bd_status_get(struct buf_desc *p_bd)
159{
160 return p_bd->p_desc->status;
161};
162
163static inline void qfec_bd_status_set(struct buf_desc *p_bd, uint32_t status)
164{
165 p_bd->p_desc->status = status;
166};
167
168static inline uint32_t qfec_bd_status_len(struct buf_desc *p_bd)
169{
170 return BUF_RX_FL_GET((*p_bd->p_desc));
171};
172
173/* control register */
174static inline void qfec_bd_ctl_reset(struct buf_desc *p_bd)
175{
176 p_bd->p_desc->ctl = 0;
177};
178
179static inline uint32_t qfec_bd_ctl_get(struct buf_desc *p_bd)
180{
181 return p_bd->p_desc->ctl;
182};
183
184static inline void qfec_bd_ctl_set(struct buf_desc *p_bd, uint32_t val)
185{
186 p_bd->p_desc->ctl |= val;
187};
188
189static inline void qfec_bd_ctl_wr(struct buf_desc *p_bd, uint32_t val)
190{
191 p_bd->p_desc->ctl = val;
192};
193
194/* pbuf register */
195static inline void *qfec_bd_pbuf_get(struct buf_desc *p_bd)
196{
197 return p_bd->p_desc->p_buf;
198}
199
200static inline void qfec_bd_pbuf_set(struct buf_desc *p_bd, void *p)
201{
202 p_bd->p_desc->p_buf = p;
203}
204
205/* next register */
206static inline void *qfec_bd_next_get(struct buf_desc *p_bd)
207{
208 return p_bd->p_desc->next;
209};
210
211/*
212 * initialize an RX BD w/ a new buf
213 */
214static int qfec_rbd_init(struct net_device *dev, struct buf_desc *p_bd)
215{
216 struct sk_buff *skb;
217 void *p;
218 void *v;
219
220 /* allocate and record ptrs for sk buff */
221 skb = dev_alloc_skb(ETH_BUF_SIZE);
222 if (!skb)
223 goto err;
224
225 qfec_bd_skbuf_set(p_bd, skb);
226
227 v = skb_put(skb, ETH_BUF_SIZE);
228 qfec_bd_virt_set(p_bd, v);
229
230 p = (void *) dma_map_single(&dev->dev,
231 (void *)skb->data, ETH_BUF_SIZE, DMA_FROM_DEVICE);
232 qfec_bd_pbuf_set(p_bd, p);
233 qfec_bd_phys_set(p_bd, p);
234
235 /* populate control register */
236 /* mark the last BD and set end-of-ring bit */
237 qfec_bd_ctl_wr(p_bd, ETH_BUF_SIZE |
238 (qfec_bd_last_bd(p_bd) ? BUF_RX_RER : 0));
239
240 qfec_bd_status_set(p_bd, BUF_OWN);
241
242 if (!(qfec_debug & QFEC_LOG_DBG2))
243 return 0;
244
245 /* debug messages */
246 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p bd\n", __func__, p_bd);
247
248 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p skb\n", __func__, skb);
249
250 QFEC_LOG(QFEC_LOG_DBG2,
251 "%s: %p p_bd, %p data, %p skb_put, %p virt, %p p_buf, %p p\n",
252 __func__, (void *)p_bd,
253 (void *)skb->data, v, /*(void *)skb_put(skb, ETH_BUF_SIZE), */
254 (void *)qfec_bd_virt_get(p_bd), (void *)qfec_bd_pbuf_get(p_bd),
255 (void *)p);
256
257 return 0;
258
259err:
260 return -ENOMEM;
261};
262
263/*
264 * ring structure used to maintain indices of buffer-descriptor (BD) usage
265 *
266 * The RX BDs are normally all pre-allocated with buffers available to be
267 * DMA'd into with received frames. The head indicates the first BD/buffer
268 * containing a received frame, and the tail indicates the oldest BD/buffer
269 * that needs to be restored for use. Head and tail are both initialized
270 * to zero, and n_free is initialized to zero, since all BD are initialized.
271 *
272 * The TX BDs are normally available for use, only being initialized as
273 * TX frames are requested for transmission. The head indicates the
274 * first available BD, and the tail indicate the oldest BD that has
275 * not been acknowledged as transmitted. Head and tail are both initialized
276 * to zero, and n_free is initialized to len, since all are available for use.
277 */
278struct ring {
279 int head;
280 int tail;
281 int n_free;
282 int len;
283};
284
285/* accessory in line functions for struct ring */
286static inline void qfec_ring_init(struct ring *p_ring, int size, int free)
287{
288 p_ring->head = p_ring->tail = 0;
289 p_ring->len = size;
290 p_ring->n_free = free;
291}
292
293static inline int qfec_ring_full(struct ring *p_ring)
294{
295 return (p_ring->n_free == 0);
296};
297
298static inline int qfec_ring_empty(struct ring *p_ring)
299{
300 return (p_ring->n_free == p_ring->len);
301}
302
303static inline void qfec_ring_head_adv(struct ring *p_ring)
304{
Rohit Vaswani73299b42011-12-16 13:38:02 -0800305 if (++p_ring->head == p_ring->len)
306 p_ring->head = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307 p_ring->n_free--;
308};
309
310static inline void qfec_ring_tail_adv(struct ring *p_ring)
311{
Rohit Vaswani73299b42011-12-16 13:38:02 -0800312 if (++p_ring->tail == p_ring->len)
313 p_ring->tail = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 p_ring->n_free++;
315};
316
317static inline int qfec_ring_head(struct ring *p_ring)
318{
319
320 return p_ring->head;
321};
322
323static inline int qfec_ring_tail(struct ring *p_ring)
324{
325 return p_ring->tail;
326};
327
328static inline int qfec_ring_room(struct ring *p_ring)
329{
330 return p_ring->n_free;
331};
332
333/*
334 * counters track normal and abnormal driver events and activity
335 */
336enum cntr {
337 isr = 0,
338 fatal_bus,
339
340 early_tx,
341 tx_no_resource,
342 tx_proc_stopped,
343 tx_jabber_tmout,
344
345 xmit,
346 tx_int,
347 tx_isr,
348 tx_owned,
349 tx_underflow,
350
351 tx_replenish,
352 tx_skb_null,
353 tx_timeout,
354 tx_too_large,
355
356 gmac_isr,
357
358 /* half */
359 norm_int,
360 abnorm_int,
361
362 early_rx,
363 rx_buf_unavail,
364 rx_proc_stopped,
365 rx_watchdog,
366
367 netif_rx_cntr,
368 rx_int,
369 rx_isr,
370 rx_owned,
371 rx_overflow,
372
373 rx_dropped,
374 rx_skb_null,
375 queue_start,
376 queue_stop,
377
378 rx_paddr_nok,
379 ts_ioctl,
380 ts_tx_en,
381 ts_tx_rtn,
382
383 ts_rec,
384 cntr_last,
385};
386
387static char *cntr_name[] = {
388 "isr",
389 "fatal_bus",
390
391 "early_tx",
392 "tx_no_resource",
393 "tx_proc_stopped",
394 "tx_jabber_tmout",
395
396 "xmit",
397 "tx_int",
398 "tx_isr",
399 "tx_owned",
400 "tx_underflow",
401
402 "tx_replenish",
403 "tx_skb_null",
404 "tx_timeout",
405 "tx_too_large",
406
407 "gmac_isr",
408
409 /* half */
410 "norm_int",
411 "abnorm_int",
412
413 "early_rx",
414 "rx_buf_unavail",
415 "rx_proc_stopped",
416 "rx_watchdog",
417
418 "netif_rx",
419 "rx_int",
420 "rx_isr",
421 "rx_owned",
422 "rx_overflow",
423
424 "rx_dropped",
425 "rx_skb_null",
426 "queue_start",
427 "queue_stop",
428
429 "rx_paddr_nok",
430 "ts_ioctl",
431 "ts_tx_en",
432 "ts_tx_rtn",
433
434 "ts_rec",
435 ""
436};
437
438/*
439 * private data
440 */
441
442static struct net_device *qfec_dev;
443
444enum qfec_state {
445 timestamping = 0x04,
446};
447
448struct qfec_priv {
449 struct net_device *net_dev;
450 struct net_device_stats stats; /* req statistics */
451
452 struct device dev;
453
454 spinlock_t xmit_lock;
455 spinlock_t mdio_lock;
456
457 unsigned int state; /* driver state */
458
459 unsigned int bd_size; /* buf-desc alloc size */
460 struct qfec_buf_desc *bd_base; /* * qfec-buf-desc */
461 dma_addr_t tbd_dma; /* dma/phy-addr buf-desc */
462 dma_addr_t rbd_dma; /* dma/phy-addr buf-desc */
463
464 struct resource *mac_res;
465 void *mac_base; /* mac (virt) base address */
466
467 struct resource *clk_res;
468 void *clk_base; /* clk (virt) base address */
469
470 struct resource *fuse_res;
471 void *fuse_base; /* mac addr fuses */
472
473 unsigned int n_tbd; /* # of TX buf-desc */
474 struct ring ring_tbd; /* TX ring */
475 struct buf_desc *p_tbd;
476 unsigned int tx_ic_mod; /* (%) val for setting IC */
477
478 unsigned int n_rbd; /* # of RX buf-desc */
479 struct ring ring_rbd; /* RX ring */
480 struct buf_desc *p_rbd;
481
482 struct buf_desc *p_latest_rbd;
483 struct buf_desc *p_ending_rbd;
484
485 unsigned long cntr[cntr_last]; /* activity counters */
486
487 struct mii_if_info mii; /* used by mii lib */
488
489 int mdio_clk; /* phy mdio clock rate */
490 int phy_id; /* default PHY addr (0) */
491 struct timer_list phy_tmr; /* monitor PHY state */
492};
493
494/*
495 * cntrs display
496 */
497
498static int qfec_cntrs_show(struct device *dev, struct device_attribute *attr,
499 char *buf)
500{
501 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
502 int h = (cntr_last + 1) / 2;
503 int l;
504 int n;
505 int count = PAGE_SIZE;
506
507 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
508
509 l = snprintf(&buf[0], count, "%s:\n", __func__);
510 for (n = 0; n < h; n++) {
511 l += snprintf(&buf[l], count - l,
512 " %12lu %-16s %12lu %s\n",
513 priv->cntr[n], cntr_name[n],
514 priv->cntr[n+h], cntr_name[n+h]);
515 }
516
517 return l;
518}
519
520# define CNTR_INC(priv, name) (priv->cntr[name]++)
521
522/*
523 * functions that manage state
524 */
525static inline void qfec_queue_start(struct net_device *dev)
526{
527 struct qfec_priv *priv = netdev_priv(dev);
528
529 if (netif_queue_stopped(dev)) {
530 netif_wake_queue(dev);
531 CNTR_INC(priv, queue_start);
532 }
533};
534
535static inline void qfec_queue_stop(struct net_device *dev)
536{
537 struct qfec_priv *priv = netdev_priv(dev);
538
539 netif_stop_queue(dev);
540 CNTR_INC(priv, queue_stop);
541};
542
543/*
544 * functions to access and initialize the MAC registers
545 */
546static inline uint32_t qfec_reg_read(struct qfec_priv *priv, uint32_t reg)
547{
548 return ioread32((void *) (priv->mac_base + reg));
549}
550
551static void qfec_reg_write(struct qfec_priv *priv, uint32_t reg, uint32_t val)
552{
553 uint32_t addr = (uint32_t)priv->mac_base + reg;
554
555 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
556 iowrite32(val, (void *)addr);
557}
558
559/*
560 * speed/duplex/pause settings
561 */
562static int qfec_config_show(struct device *dev, struct device_attribute *attr,
563 char *buf)
564{
565 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
566 int cfg = qfec_reg_read(priv, MAC_CONFIG_REG);
567 int flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
568 int l = 0;
569 int count = PAGE_SIZE;
570
571 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
572
573 l += snprintf(&buf[l], count, "%s:", __func__);
574
575 l += snprintf(&buf[l], count - l, " [0x%08x] %4dM %s %s", cfg,
576 (cfg & MAC_CONFIG_REG_PS)
577 ? ((cfg & MAC_CONFIG_REG_FES) ? 100 : 10) : 1000,
578 cfg & MAC_CONFIG_REG_DM ? "FD" : "HD",
579 cfg & MAC_CONFIG_REG_IPC ? "IPC" : "NoIPC");
580
581 flow &= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
582 l += snprintf(&buf[l], count - l, " [0x%08x] %s", flow,
583 (flow == (FLOW_CONTROL_RFE | FLOW_CONTROL_TFE)) ? "PAUSE"
584 : ((flow == FLOW_CONTROL_RFE) ? "RX-PAUSE"
585 : ((flow == FLOW_CONTROL_TFE) ? "TX-PAUSE" : "")));
586
587 l += snprintf(&buf[l], count - l, " %s", QFEC_DRV_VER);
588 l += snprintf(&buf[l], count - l, "\n");
589 return l;
590}
591
592
593/*
594 * table and functions to initialize controller registers
595 */
596
597struct reg_entry {
598 unsigned int rdonly;
599 unsigned int addr;
600 char *label;
601 unsigned int val;
602};
603
604static struct reg_entry qfec_reg_tbl[] = {
605 { 0, BUS_MODE_REG, "BUS_MODE_REG", BUS_MODE_REG_DEFAULT },
606 { 0, AXI_BUS_MODE_REG, "AXI_BUS_MODE_REG", AXI_BUS_MODE_DEFAULT },
607 { 0, AXI_STATUS_REG, "AXI_STATUS_REG", 0 },
608
609 { 0, MAC_ADR_0_HIGH_REG, "MAC_ADR_0_HIGH_REG", 0x00000302 },
610 { 0, MAC_ADR_0_LOW_REG, "MAC_ADR_0_LOW_REG", 0x01350702 },
611
612 { 1, RX_DES_LST_ADR_REG, "RX_DES_LST_ADR_REG", 0 },
613 { 1, TX_DES_LST_ADR_REG, "TX_DES_LST_ADR_REG", 0 },
614 { 1, STATUS_REG, "STATUS_REG", 0 },
615 { 1, DEBUG_REG, "DEBUG_REG", 0 },
616
617 { 0, INTRP_EN_REG, "INTRP_EN_REG", QFEC_INTRP_SETUP},
618
619 { 1, CUR_HOST_TX_DES_REG, "CUR_HOST_TX_DES_REG", 0 },
620 { 1, CUR_HOST_RX_DES_REG, "CUR_HOST_RX_DES_REG", 0 },
621 { 1, CUR_HOST_TX_BU_ADR_REG, "CUR_HOST_TX_BU_ADR_REG", 0 },
622 { 1, CUR_HOST_RX_BU_ADR_REG, "CUR_HOST_RX_BU_ADR_REG", 0 },
623
624 { 1, MAC_FR_FILTER_REG, "MAC_FR_FILTER_REG", 0 },
625
626 { 0, MAC_CONFIG_REG, "MAC_CONFIG_REG", MAC_CONFIG_REG_SPD_1G
627 | MAC_CONFIG_REG_DM
628 | MAC_CONFIG_REG_TE
629 | MAC_CONFIG_REG_RE
630 | MAC_CONFIG_REG_IPC },
631
632 { 1, INTRP_STATUS_REG, "INTRP_STATUS_REG", 0 },
633 { 1, INTRP_MASK_REG, "INTRP_MASK_REG", 0 },
634
635 { 0, OPER_MODE_REG, "OPER_MODE_REG", OPER_MODE_REG_DEFAULT },
636
637 { 1, GMII_ADR_REG, "GMII_ADR_REG", 0 },
638 { 1, GMII_DATA_REG, "GMII_DATA_REG", 0 },
639
640 { 0, MMC_INTR_MASK_RX_REG, "MMC_INTR_MASK_RX_REG", 0xFFFFFFFF },
641 { 0, MMC_INTR_MASK_TX_REG, "MMC_INTR_MASK_TX_REG", 0xFFFFFFFF },
642
643 { 1, TS_HIGH_REG, "TS_HIGH_REG", 0 },
644 { 1, TS_LOW_REG, "TS_LOW_REG", 0 },
645
646 { 1, TS_HI_UPDT_REG, "TS_HI_UPDATE_REG", 0 },
647 { 1, TS_LO_UPDT_REG, "TS_LO_UPDATE_REG", 0 },
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -0700648 { 0, TS_SUB_SEC_INCR_REG, "TS_SUB_SEC_INCR_REG", 1 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 { 0, TS_CTL_REG, "TS_CTL_REG", TS_CTL_TSENALL
650 | TS_CTL_TSCTRLSSR
651 | TS_CTL_TSINIT
652 | TS_CTL_TSENA },
653};
654
655static void qfec_reg_init(struct qfec_priv *priv)
656{
657 struct reg_entry *p = qfec_reg_tbl;
658 int n = ARRAY_SIZE(qfec_reg_tbl);
659
660 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
661
662 for (; n--; p++) {
663 if (!p->rdonly)
664 qfec_reg_write(priv, p->addr, p->val);
665 }
666}
667
668/*
669 * display registers thru sysfs
670 */
671static int qfec_reg_show(struct device *dev, struct device_attribute *attr,
672 char *buf)
673{
674 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
675 struct reg_entry *p = qfec_reg_tbl;
676 int n = ARRAY_SIZE(qfec_reg_tbl);
677 int l = 0;
678 int count = PAGE_SIZE;
679
680 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
681
682 for (; n--; p++) {
683 l += snprintf(&buf[l], count - l, " %8p %04x %08x %s\n",
684 (void *)priv->mac_base + p->addr, p->addr,
685 qfec_reg_read(priv, p->addr), p->label);
686 }
687
688 return l;
689}
690
691/*
692 * set the MAC-0 address
693 */
694static void qfec_set_adr_regs(struct qfec_priv *priv, uint8_t *addr)
695{
696 uint32_t h = 0;
697 uint32_t l = 0;
698
699 h = h << 8 | addr[5];
700 h = h << 8 | addr[4];
701
702 l = l << 8 | addr[3];
703 l = l << 8 | addr[2];
704 l = l << 8 | addr[1];
705 l = l << 8 | addr[0];
706
707 qfec_reg_write(priv, MAC_ADR_0_HIGH_REG, h);
708 qfec_reg_write(priv, MAC_ADR_0_LOW_REG, l);
709
710 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x %08x\n", __func__, h, l);
711}
712
713/*
Rohit Vaswani0565a2d2011-09-15 12:53:07 -0700714 * set up the RX filter
715 */
716static void qfec_set_rx_mode(struct net_device *dev)
717{
718 struct qfec_priv *priv = netdev_priv(dev);
719 uint32_t filter_conf;
720 int index;
721
722 /* Clear address filter entries */
723 for (index = 1; index < MAC_ADR_MAX; ++index) {
724 qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), 0);
725 qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), 0);
726 }
727
728 if (dev->flags & IFF_PROMISC) {
729 /* Receive all frames */
730 filter_conf = MAC_FR_FILTER_RA;
731 } else if ((dev->flags & IFF_MULTICAST) == 0) {
732 /* Unicast filtering only */
733 filter_conf = MAC_FR_FILTER_HPF;
734 } else if ((netdev_mc_count(dev) > MAC_ADR_MAX - 1) ||
735 (dev->flags & IFF_ALLMULTI)) {
736 /* Unicast filtering is enabled, Pass all multicast frames */
737 filter_conf = MAC_FR_FILTER_HPF | MAC_FR_FILTER_PM;
738 } else {
739 struct netdev_hw_addr *ha;
740
741 /* Both unicast and multicast filtering are enabled */
742 filter_conf = MAC_FR_FILTER_HPF;
743
744 index = 1;
745
746 netdev_for_each_mc_addr(ha, dev) {
747 uint32_t high, low;
748
749 high = (1 << 31) | (ha->addr[5] << 8) | (ha->addr[4]);
750 low = (ha->addr[3] << 24) | (ha->addr[2] << 16) |
751 (ha->addr[1] << 8) | (ha->addr[0]);
752
753 qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), high);
754 qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), low);
755
756 index++;
757 }
758 }
759
760 qfec_reg_write(priv, MAC_FR_FILTER_REG, filter_conf);
761}
762
763/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 * reset the controller
765 */
766
767#define QFEC_RESET_TIMEOUT 10000
768 /* reset should always clear but did not w/o test/delay
769 * in RgMii mode. there is no spec'd max timeout
770 */
771
772static int qfec_hw_reset(struct qfec_priv *priv)
773{
774 int timeout = QFEC_RESET_TIMEOUT;
775
776 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
777
778 qfec_reg_write(priv, BUS_MODE_REG, BUS_MODE_SWR);
779
780 while (qfec_reg_read(priv, BUS_MODE_REG) & BUS_MODE_SWR) {
781 if (timeout-- == 0) {
782 QFEC_LOG_ERR("%s: timeout\n", __func__);
783 return -ETIME;
784 }
785
786 /* there were problems resetting the controller
787 * in RGMII mode when there wasn't sufficient
788 * delay between register reads
789 */
790 usleep_range(100, 200);
791 }
792
793 return 0;
794}
795
796/*
797 * initialize controller
798 */
799static int qfec_hw_init(struct qfec_priv *priv)
800{
801 int res = 0;
802
803 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
804
805 res = qfec_hw_reset(priv);
806 if (res)
807 return res;
808
809 qfec_reg_init(priv);
810
811 /* config buf-desc locations */
812 qfec_reg_write(priv, TX_DES_LST_ADR_REG, priv->tbd_dma);
813 qfec_reg_write(priv, RX_DES_LST_ADR_REG, priv->rbd_dma);
814
815 /* clear interrupts */
816 qfec_reg_write(priv, STATUS_REG, INTRP_EN_REG_NIE | INTRP_EN_REG_RIE
817 | INTRP_EN_REG_TIE | INTRP_EN_REG_TUE | INTRP_EN_REG_ETE);
818
Rohit Vaswani73299b42011-12-16 13:38:02 -0800819 if (priv->mii.supports_gmii) {
820 /* Clear RGMII */
821 qfec_reg_read(priv, SG_RG_SMII_STATUS_REG);
822 /* Disable RGMII int */
823 qfec_reg_write(priv, INTRP_MASK_REG, 1);
824 }
825
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 return res;
827}
828
829/*
830 * en/disable controller
831 */
832static void qfec_hw_enable(struct qfec_priv *priv)
833{
834 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
835
836 qfec_reg_write(priv, OPER_MODE_REG,
837 qfec_reg_read(priv, OPER_MODE_REG)
838 | OPER_MODE_REG_ST | OPER_MODE_REG_SR);
839}
840
841static void qfec_hw_disable(struct qfec_priv *priv)
842{
843 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
844
845 qfec_reg_write(priv, OPER_MODE_REG,
846 qfec_reg_read(priv, OPER_MODE_REG)
847 & ~(OPER_MODE_REG_ST | OPER_MODE_REG_SR));
848}
849
850/*
851 * interface selection
852 */
853struct intf_config {
854 uint32_t intf_sel;
855 uint32_t emac_ns;
856 uint32_t eth_x_en_ns;
857 uint32_t clkmux_sel;
858};
859
860#define ETH_X_EN_NS_REVMII (ETH_X_EN_NS_DEFAULT | ETH_TX_CLK_INV)
861#define CLKMUX_REVMII (EMAC_CLKMUX_SEL_0 | EMAC_CLKMUX_SEL_1)
862
863static struct intf_config intf_config_tbl[] = {
864 { EMAC_PHY_INTF_SEL_MII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
865 { EMAC_PHY_INTF_SEL_RGMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
866 { EMAC_PHY_INTF_SEL_REVMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_REVMII,
867 CLKMUX_REVMII }
868};
869
870/*
871 * emac clk register read and write functions
872 */
873static inline uint32_t qfec_clkreg_read(struct qfec_priv *priv, uint32_t reg)
874{
875 return ioread32((void *) (priv->clk_base + reg));
876}
877
878static inline void qfec_clkreg_write(struct qfec_priv *priv,
879 uint32_t reg, uint32_t val)
880{
881 uint32_t addr = (uint32_t)priv->clk_base + reg;
882
883 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
884 iowrite32(val, (void *)addr);
885}
886
887/*
888 * configure the PHY interface and clock routing and signal bits
889 */
890enum phy_intfc {
Rohit Vaswani73299b42011-12-16 13:38:02 -0800891 INTFC_MII = 0,
892 INTFC_RGMII = 1,
893 INTFC_REVMII = 2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894};
895
896static int qfec_intf_sel(struct qfec_priv *priv, unsigned int intfc)
897{
898 struct intf_config *p;
899
900 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d\n", __func__, intfc);
901
Rohit Vaswani73299b42011-12-16 13:38:02 -0800902 if (intfc > INTFC_REVMII) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700903 QFEC_LOG_ERR("%s: range\n", __func__);
904 return -ENXIO;
905 }
906
907 p = &intf_config_tbl[intfc];
908
909 qfec_clkreg_write(priv, EMAC_PHY_INTF_SEL_REG, p->intf_sel);
910 qfec_clkreg_write(priv, EMAC_NS_REG, p->emac_ns);
911 qfec_clkreg_write(priv, ETH_X_EN_NS_REG, p->eth_x_en_ns);
912 qfec_clkreg_write(priv, EMAC_CLKMUX_SEL_REG, p->clkmux_sel);
913
914 return 0;
915}
916
917/*
918 * display registers thru proc-fs
919 */
920static struct qfec_clk_reg {
921 uint32_t offset;
922 char *label;
923} qfec_clk_regs[] = {
924 { ETH_MD_REG, "ETH_MD_REG" },
925 { ETH_NS_REG, "ETH_NS_REG" },
926 { ETH_X_EN_NS_REG, "ETH_X_EN_NS_REG" },
927 { EMAC_PTP_MD_REG, "EMAC_PTP_MD_REG" },
928 { EMAC_PTP_NS_REG, "EMAC_PTP_NS_REG" },
929 { EMAC_NS_REG, "EMAC_NS_REG" },
930 { EMAC_TX_FS_REG, "EMAC_TX_FS_REG" },
931 { EMAC_RX_FS_REG, "EMAC_RX_FS_REG" },
932 { EMAC_PHY_INTF_SEL_REG, "EMAC_PHY_INTF_SEL_REG" },
933 { EMAC_PHY_ADDR_REG, "EMAC_PHY_ADDR_REG" },
934 { EMAC_REVMII_PHY_ADDR_REG, "EMAC_REVMII_PHY_ADDR_REG" },
935 { EMAC_CLKMUX_SEL_REG, "EMAC_CLKMUX_SEL_REG" },
936};
937
938static int qfec_clk_reg_show(struct device *dev, struct device_attribute *attr,
939 char *buf)
940{
941 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
942 struct qfec_clk_reg *p = qfec_clk_regs;
943 int n = ARRAY_SIZE(qfec_clk_regs);
944 int l = 0;
945 int count = PAGE_SIZE;
946
947 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
948
949 for (; n--; p++) {
950 l += snprintf(&buf[l], count - l, " %8p %8x %08x %s\n",
951 (void *)priv->clk_base + p->offset, p->offset,
952 qfec_clkreg_read(priv, p->offset), p->label);
953 }
954
955 return l;
956}
957
958/*
959 * speed selection
960 */
961
962struct qfec_pll_cfg {
963 uint32_t spd;
964 uint32_t eth_md; /* M [31:16], NOT 2*D [15:0] */
965 uint32_t eth_ns; /* NOT(M-N) [31:16], ctl bits [11:0] */
966};
967
968static struct qfec_pll_cfg qfec_pll_cfg_tbl[] = {
969 /* 2.5 MHz */
970 { MAC_CONFIG_REG_SPD_10, ETH_MD_M(1) | ETH_MD_2D_N(100),
971 ETH_NS_NM(100-1)
972 | ETH_NS_MCNTR_EN
973 | ETH_NS_MCNTR_MODE_DUAL
974 | ETH_NS_PRE_DIV(0)
975 | CLK_SRC_PLL_EMAC },
976 /* 25 MHz */
977 { MAC_CONFIG_REG_SPD_100, ETH_MD_M(1) | ETH_MD_2D_N(10),
978 ETH_NS_NM(10-1)
979 | ETH_NS_MCNTR_EN
980 | ETH_NS_MCNTR_MODE_DUAL
981 | ETH_NS_PRE_DIV(0)
982 | CLK_SRC_PLL_EMAC },
983 /* 125 MHz */
984 {MAC_CONFIG_REG_SPD_1G, 0, ETH_NS_PRE_DIV(1)
985 | CLK_SRC_PLL_EMAC },
986};
987
988enum speed {
Rohit Vaswani73299b42011-12-16 13:38:02 -0800989 SPD_10 = 0,
990 SPD_100 = 1,
991 SPD_1000 = 2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992};
993
994/*
995 * configure the PHY interface and clock routing and signal bits
996 */
997static int qfec_speed_cfg(struct net_device *dev, unsigned int spd,
998 unsigned int dplx)
999{
1000 struct qfec_priv *priv = netdev_priv(dev);
1001 struct qfec_pll_cfg *p;
1002
1003 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d spd, %d dplx\n", __func__, spd, dplx);
1004
Rohit Vaswani73299b42011-12-16 13:38:02 -08001005 if (spd > SPD_1000) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001006 QFEC_LOG_ERR("%s: range\n", __func__);
1007 return -ENODEV;
1008 }
1009
1010 p = &qfec_pll_cfg_tbl[spd];
1011
1012 /* set the MAC speed bits */
1013 qfec_reg_write(priv, MAC_CONFIG_REG,
1014 (qfec_reg_read(priv, MAC_CONFIG_REG)
1015 & ~(MAC_CONFIG_REG_SPD | MAC_CONFIG_REG_DM))
Rohit Vaswani73299b42011-12-16 13:38:02 -08001016 | p->spd | (dplx ? MAC_CONFIG_REG_DM : H_DPLX));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017
1018 qfec_clkreg_write(priv, ETH_MD_REG, p->eth_md);
1019 qfec_clkreg_write(priv, ETH_NS_REG, p->eth_ns);
1020
1021 return 0;
1022}
1023
1024/*
1025 * configure PTP divider for 25 MHz assuming EMAC PLL 250 MHz
1026 */
1027
1028static struct qfec_pll_cfg qfec_pll_ptp = {
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001029 /* 19.2 MHz tcxo */
1030 0, 0, ETH_NS_PRE_DIV(0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001031 | EMAC_PTP_NS_ROOT_EN
1032 | EMAC_PTP_NS_CLK_EN
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001033 | CLK_SRC_TCXO
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034};
1035
1036#define PLLTEST_PAD_CFG 0x01E0
1037#define PLLTEST_PLL_7 0x3700
1038
1039#define CLKTEST_REG 0x01EC
1040#define CLKTEST_EMAC_RX 0x3fc07f7a
1041
1042static int qfec_ptp_cfg(struct qfec_priv *priv)
1043{
1044 struct qfec_pll_cfg *p = &qfec_pll_ptp;
1045
1046 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x md, %08x ns\n",
1047 __func__, p->eth_md, p->eth_ns);
1048
1049 qfec_clkreg_write(priv, EMAC_PTP_MD_REG, p->eth_md);
1050 qfec_clkreg_write(priv, EMAC_PTP_NS_REG, p->eth_ns);
1051
1052 /* configure HS/LS clk test ports to verify clks */
1053 qfec_clkreg_write(priv, CLKTEST_REG, CLKTEST_EMAC_RX);
1054 qfec_clkreg_write(priv, PLLTEST_PAD_CFG, PLLTEST_PLL_7);
1055
1056 return 0;
1057}
1058
1059/*
1060 * MDIO operations
1061 */
1062
1063/*
1064 * wait reasonable amount of time for MDIO operation to complete, not busy
1065 */
1066static int qfec_mdio_busy(struct net_device *dev)
1067{
1068 int i;
1069
1070 for (i = 100; i > 0; i--) {
1071 if (!(qfec_reg_read(
1072 netdev_priv(dev), GMII_ADR_REG) & GMII_ADR_REG_GB)) {
1073 return 0;
1074 }
1075 udelay(1);
1076 }
1077
1078 return -ETIME;
1079}
1080
1081/*
1082 * initiate either a read or write MDIO operation
1083 */
1084
1085static int qfec_mdio_oper(struct net_device *dev, int phy_id, int reg, int wr)
1086{
1087 struct qfec_priv *priv = netdev_priv(dev);
1088 int res = 0;
1089
1090 /* insure phy not busy */
1091 res = qfec_mdio_busy(dev);
1092 if (res) {
1093 QFEC_LOG_ERR("%s: busy\n", __func__);
1094 goto done;
1095 }
1096
1097 /* initiate operation */
1098 qfec_reg_write(priv, GMII_ADR_REG,
1099 GMII_ADR_REG_ADR_SET(phy_id)
1100 | GMII_ADR_REG_REG_SET(reg)
1101 | GMII_ADR_REG_CSR_SET(priv->mdio_clk)
1102 | (wr ? GMII_ADR_REG_GW : 0)
1103 | GMII_ADR_REG_GB);
1104
1105 /* wait for operation to complete */
1106 res = qfec_mdio_busy(dev);
1107 if (res)
1108 QFEC_LOG_ERR("%s: timeout\n", __func__);
1109
1110done:
1111 return res;
1112}
1113
1114/*
1115 * read MDIO register
1116 */
1117static int qfec_mdio_read(struct net_device *dev, int phy_id, int reg)
1118{
1119 struct qfec_priv *priv = netdev_priv(dev);
1120 int res = 0;
1121 unsigned long flags;
1122
1123 spin_lock_irqsave(&priv->mdio_lock, flags);
1124
1125 res = qfec_mdio_oper(dev, phy_id, reg, 0);
1126 if (res) {
1127 QFEC_LOG_ERR("%s: oper\n", __func__);
1128 goto done;
1129 }
1130
1131 res = qfec_reg_read(priv, GMII_DATA_REG);
1132 QFEC_LOG(QFEC_LOG_MDIO_R, "%s: %2d reg, 0x%04x val\n",
1133 __func__, reg, res);
1134
1135done:
1136 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1137 return res;
1138}
1139
1140/*
1141 * write MDIO register
1142 */
1143static void qfec_mdio_write(struct net_device *dev, int phy_id, int reg,
1144 int val)
1145{
1146 struct qfec_priv *priv = netdev_priv(dev);
1147 unsigned long flags;
1148
1149 spin_lock_irqsave(&priv->mdio_lock, flags);
1150
1151 QFEC_LOG(QFEC_LOG_MDIO_W, "%s: %2d reg, %04x\n",
1152 __func__, reg, val);
1153
1154 qfec_reg_write(priv, GMII_DATA_REG, val);
1155
1156 if (qfec_mdio_oper(dev, phy_id, reg, 1))
1157 QFEC_LOG_ERR("%s: oper\n", __func__);
1158
1159 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1160}
1161
1162/*
Rohit Vaswani73299b42011-12-16 13:38:02 -08001163 * MDIO show
1164 */
1165static int qfec_mdio_show(struct device *dev, struct device_attribute *attr,
1166 char *buf)
1167{
1168 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1169 int n;
1170 int l = 0;
1171 int count = PAGE_SIZE;
1172
1173 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
1174
1175 for (n = 0; n < MAX_MDIO_REG; n++) {
1176 if (!(n % 8))
1177 l += snprintf(&buf[l], count - l, "\n %02x: ", n);
1178
1179 l += snprintf(&buf[l], count - l, " %04x",
1180 qfec_mdio_read(to_net_dev(dev), priv->phy_id, n));
1181 }
1182 l += snprintf(&buf[l], count - l, "\n");
1183
1184 return l;
1185}
1186
1187/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188 * get auto-negotiation results
1189 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190#define QFEC_100 (LPA_100HALF | LPA_100FULL | LPA_100HALF)
1191#define QFEC_100_FD (LPA_100FULL | LPA_100BASE4)
1192#define QFEC_10 (LPA_10HALF | LPA_10FULL)
1193#define QFEC_10_FD LPA_10FULL
1194
1195static void qfec_get_an(struct net_device *dev, uint32_t *spd, uint32_t *dplx)
1196{
1197 struct qfec_priv *priv = netdev_priv(dev);
Rohit Vaswani73299b42011-12-16 13:38:02 -08001198 uint32_t advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
1199 uint32_t lpa = qfec_mdio_read(dev, priv->phy_id, MII_LPA);
1200 uint32_t mastCtrl = qfec_mdio_read(dev, priv->phy_id, MII_CTRL1000);
1201 uint32_t mastStat = qfec_mdio_read(dev, priv->phy_id, MII_STAT1000);
1202 uint32_t anExp = qfec_mdio_read(dev, priv->phy_id, MII_EXPANSION);
1203 uint32_t status = advert & lpa;
1204 uint32_t flow;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205
Rohit Vaswani73299b42011-12-16 13:38:02 -08001206 if (priv->mii.supports_gmii) {
1207 if (((anExp & QFEC_MII_EXP_MASK) == QFEC_MII_EXP_MASK)
1208 && (mastCtrl & ADVERTISE_1000FULL)
1209 && (mastStat & LPA_1000FULL)) {
1210 *spd = SPD_1000;
1211 *dplx = F_DPLX;
1212 goto pause;
1213 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214
Rohit Vaswani73299b42011-12-16 13:38:02 -08001215 else if (((anExp & QFEC_MII_EXP_MASK) == QFEC_MII_EXP_MASK)
1216 && (mastCtrl & ADVERTISE_1000HALF)
1217 && (mastStat & LPA_1000HALF)) {
1218 *spd = SPD_1000;
1219 *dplx = H_DPLX;
1220 goto pause;
1221 }
1222 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223
Rohit Vaswani73299b42011-12-16 13:38:02 -08001224 /* mii speeds */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001225 if (status & QFEC_100) {
Rohit Vaswani73299b42011-12-16 13:38:02 -08001226 *spd = SPD_100;
1227 *dplx = status & QFEC_100_FD ? F_DPLX : H_DPLX;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001228 }
1229
1230 else if (status & QFEC_10) {
Rohit Vaswani73299b42011-12-16 13:38:02 -08001231 *spd = SPD_10;
1232 *dplx = status & QFEC_10_FD ? F_DPLX : H_DPLX;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001233 }
1234
1235 /* check pause */
Rohit Vaswani73299b42011-12-16 13:38:02 -08001236pause:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
1238 flow &= ~(FLOW_CONTROL_TFE | FLOW_CONTROL_RFE);
1239
1240 if (status & ADVERTISE_PAUSE_CAP) {
1241 flow |= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
1242 } else if (status & ADVERTISE_PAUSE_ASYM) {
1243 if (lpa & ADVERTISE_PAUSE_CAP)
1244 flow |= FLOW_CONTROL_TFE;
1245 else if (advert & ADVERTISE_PAUSE_CAP)
1246 flow |= FLOW_CONTROL_RFE;
1247 }
1248
1249 qfec_reg_write(priv, FLOW_CONTROL_REG, flow);
1250}
1251
1252/*
1253 * monitor phy status, and process auto-neg results when changed
1254 */
1255
1256static void qfec_phy_monitor(unsigned long data)
1257{
1258 struct net_device *dev = (struct net_device *) data;
1259 struct qfec_priv *priv = netdev_priv(dev);
Rohit Vaswani73299b42011-12-16 13:38:02 -08001260 unsigned int spd = H_DPLX;
1261 unsigned int dplx = F_DPLX;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001262
1263 mod_timer(&priv->phy_tmr, jiffies + HZ);
1264
1265 if (mii_link_ok(&priv->mii) && !netif_carrier_ok(priv->net_dev)) {
1266 qfec_get_an(dev, &spd, &dplx);
1267 qfec_speed_cfg(dev, spd, dplx);
1268 QFEC_LOG(QFEC_LOG_DBG, "%s: link up, %d spd, %d dplx\n",
1269 __func__, spd, dplx);
1270
1271 netif_carrier_on(dev);
1272 }
1273
1274 else if (!mii_link_ok(&priv->mii) && netif_carrier_ok(priv->net_dev)) {
1275 QFEC_LOG(QFEC_LOG_DBG, "%s: link down\n", __func__);
1276 netif_carrier_off(dev);
1277 }
1278}
1279
1280/*
1281 * dealloc buffer descriptor memory
1282 */
1283
1284static void qfec_mem_dealloc(struct net_device *dev)
1285{
1286 struct qfec_priv *priv = netdev_priv(dev);
1287
1288 dma_free_coherent(&dev->dev,
1289 priv->bd_size, priv->bd_base, priv->tbd_dma);
1290 priv->bd_base = 0;
1291}
1292
1293/*
1294 * allocate shared device memory for TX/RX buf-desc (and buffers)
1295 */
1296
1297static int qfec_mem_alloc(struct net_device *dev)
1298{
1299 struct qfec_priv *priv = netdev_priv(dev);
1300
1301 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1302
1303 priv->bd_size =
1304 (priv->n_tbd + priv->n_rbd) * sizeof(struct qfec_buf_desc);
1305
1306 priv->p_tbd = kcalloc(priv->n_tbd, sizeof(struct buf_desc), GFP_KERNEL);
1307 if (!priv->p_tbd) {
1308 QFEC_LOG_ERR("%s: kcalloc failed p_tbd\n", __func__);
1309 return -ENOMEM;
1310 }
1311
1312 priv->p_rbd = kcalloc(priv->n_rbd, sizeof(struct buf_desc), GFP_KERNEL);
1313 if (!priv->p_rbd) {
1314 QFEC_LOG_ERR("%s: kcalloc failed p_rbd\n", __func__);
1315 return -ENOMEM;
1316 }
1317
1318 /* alloc mem for buf-desc, if not already alloc'd */
1319 if (!priv->bd_base) {
1320 priv->bd_base = dma_alloc_coherent(&dev->dev,
1321 priv->bd_size, &priv->tbd_dma,
1322 GFP_KERNEL | __GFP_DMA);
1323 }
1324
1325 if (!priv->bd_base) {
1326 QFEC_LOG_ERR("%s: dma_alloc_coherent failed\n", __func__);
1327 return -ENOMEM;
1328 }
1329
1330 priv->rbd_dma = priv->tbd_dma
1331 + (priv->n_tbd * sizeof(struct qfec_buf_desc));
1332
1333 QFEC_LOG(QFEC_LOG_DBG,
1334 " %s: 0x%08x size, %d n_tbd, %d n_rbd\n",
1335 __func__, priv->bd_size, priv->n_tbd, priv->n_rbd);
1336
1337 return 0;
1338}
1339
1340/*
1341 * display buffer descriptors
1342 */
1343
1344static int qfec_bd_fmt(char *buf, int size, struct buf_desc *p_bd)
1345{
1346 return snprintf(buf, size,
1347 "%8p: %08x %08x %8p %8p %8p %8p %8p %x",
1348 p_bd, qfec_bd_status_get(p_bd),
1349 qfec_bd_ctl_get(p_bd), qfec_bd_pbuf_get(p_bd),
1350 qfec_bd_next_get(p_bd), qfec_bd_skbuf_get(p_bd),
1351 qfec_bd_virt_get(p_bd), qfec_bd_phys_get(p_bd),
1352 qfec_bd_last_bd(p_bd));
1353}
1354
1355static int qfec_bd_show(char *buf, int count, struct buf_desc *p_bd, int n_bd,
1356 struct ring *p_ring, char *label)
1357{
1358 int l = 0;
1359 int n;
1360
1361 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, label);
1362
1363 l += snprintf(&buf[l], count, "%s: %s\n", __func__, label);
1364 if (!p_bd)
1365 return l;
1366
1367 n_bd = n_bd > MAX_N_BD ? MAX_N_BD : n_bd;
1368
1369 for (n = 0; n < n_bd; n++, p_bd++) {
1370 l += qfec_bd_fmt(&buf[l], count - l, p_bd);
1371 l += snprintf(&buf[l], count - l, "%s%s\n",
1372 (qfec_ring_head(p_ring) == n ? " < h" : ""),
1373 (qfec_ring_tail(p_ring) == n ? " < t" : ""));
1374 }
1375
1376 return l;
1377}
1378
1379/*
1380 * display TX BDs
1381 */
1382static int qfec_bd_tx_show(struct device *dev, struct device_attribute *attr,
1383 char *buf)
1384{
1385 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1386 int count = PAGE_SIZE;
1387
1388 return qfec_bd_show(buf, count, priv->p_tbd, priv->n_tbd,
1389 &priv->ring_tbd, "TX");
1390}
1391
1392/*
1393 * display RX BDs
1394 */
1395static int qfec_bd_rx_show(struct device *dev, struct device_attribute *attr,
1396 char *buf)
1397{
1398 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1399 int count = PAGE_SIZE;
1400
1401 return qfec_bd_show(buf, count, priv->p_rbd, priv->n_rbd,
1402 &priv->ring_rbd, "RX");
1403}
1404
1405/*
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001406 * process timestamp values
1407 * The pbuf and next fields of the buffer descriptors are overwritten
1408 * with the timestamp high and low register values.
1409 *
1410 * The low register is incremented by the value in the subsec_increment
1411 * register and overflows at 0x8000 0000 causing the high register to
1412 * increment.
1413 *
1414 * The subsec_increment register is recommended to be set to the number
1415 * of nanosec corresponding to each clock tic, scaled by 2^31 / 10^9
1416 * (e.g. 40 * 2^32 / 10^9 = 85.9, or 86 for 25 MHz). However, the
1417 * rounding error in this case will result in a 1 sec error / ~14 mins.
1418 *
1419 * An alternate approach is used. The subsec_increment is set to 1,
1420 * and the concatenation of the 2 timestamp registers used to count
1421 * clock tics. The 63-bit result is manipulated to determine the number
1422 * of sec and ns.
1423 */
1424
1425/*
1426 * convert 19.2 MHz clock tics into sec/ns
1427 */
1428#define TS_LOW_REG_BITS 31
1429
1430#define MILLION 1000000UL
1431#define BILLION 1000000000UL
1432
1433#define F_CLK 19200000UL
1434#define F_CLK_PRE_SC 24
1435#define F_CLK_INV_Q 56
1436#define F_CLK_INV (((unsigned long long)1 << F_CLK_INV_Q) / F_CLK)
1437#define F_CLK_TO_NS_Q 25
1438#define F_CLK_TO_NS \
1439 (((((unsigned long long)1<<F_CLK_TO_NS_Q)*BILLION)+(F_CLK-1))/F_CLK)
1440#define US_TO_F_CLK_Q 20
1441#define US_TO_F_CLK \
1442 (((((unsigned long long)1<<US_TO_F_CLK_Q)*F_CLK)+(MILLION-1))/MILLION)
1443
1444static inline void qfec_get_sec(uint64_t *cnt,
1445 uint32_t *sec, uint32_t *ns)
1446{
1447 unsigned long long t;
1448 unsigned long long subsec;
1449
1450 t = *cnt >> F_CLK_PRE_SC;
1451 t *= F_CLK_INV;
1452 t >>= F_CLK_INV_Q - F_CLK_PRE_SC;
1453 *sec = t;
1454
1455 t = *cnt - (t * F_CLK);
1456 subsec = t;
1457
1458 if (subsec >= F_CLK) {
1459 subsec -= F_CLK;
1460 *sec += 1;
1461 }
1462
1463 subsec *= F_CLK_TO_NS;
1464 subsec >>= F_CLK_TO_NS_Q;
1465 *ns = subsec;
1466}
1467
1468/*
1469 * read ethernet timestamp registers, pass up raw register values
1470 * and values converted to sec/ns
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 */
1472static void qfec_read_timestamp(struct buf_desc *p_bd,
1473 struct skb_shared_hwtstamps *ts)
1474{
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001475 unsigned long long cnt;
1476 unsigned int sec;
1477 unsigned int subsec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001478
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001479 cnt = (unsigned long)qfec_bd_next_get(p_bd);
1480 cnt <<= TS_LOW_REG_BITS;
1481 cnt |= (unsigned long)qfec_bd_pbuf_get(p_bd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07001483 /* report raw counts as concatenated 63 bits */
1484 sec = cnt >> 32;
1485 subsec = cnt & 0xffffffff;
1486
1487 ts->hwtstamp = ktime_set(sec, subsec);
1488
1489 /* translate counts to sec and ns */
1490 qfec_get_sec(&cnt, &sec, &subsec);
1491
1492 ts->syststamp = ktime_set(sec, subsec);
1493}
1494
1495/*
1496 * capture the current system time in the timestamp registers
1497 */
1498static int qfec_cmd(struct device *dev, struct device_attribute *attr,
1499 const char *buf, size_t count)
1500{
1501 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1502 struct timeval tv;
1503
1504 if (!strncmp(buf, "setTs", 5)) {
1505 unsigned long long cnt;
1506 uint32_t ts_hi;
1507 uint32_t ts_lo;
1508 unsigned long long subsec;
1509
1510 do_gettimeofday(&tv);
1511
1512 /* convert raw sec/usec to ns */
1513 subsec = tv.tv_usec;
1514 subsec *= US_TO_F_CLK;
1515 subsec >>= US_TO_F_CLK_Q;
1516
1517 cnt = tv.tv_sec;
1518 cnt *= F_CLK;
1519 cnt += subsec;
1520
1521 ts_hi = cnt >> 31;
1522 ts_lo = cnt & 0x7FFFFFFF;
1523
1524 qfec_reg_write(priv, TS_HI_UPDT_REG, ts_hi);
1525 qfec_reg_write(priv, TS_LO_UPDT_REG, ts_lo);
1526
1527 qfec_reg_write(priv, TS_CTL_REG,
1528 qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSINIT);
1529 } else
1530 pr_err("%s: unknown cmd, %s.\n", __func__, buf);
1531
1532 return strnlen(buf, count);
1533}
1534
1535/*
1536 * display ethernet tstamp and system time
1537 */
1538static int qfec_tstamp_show(struct device *dev, struct device_attribute *attr,
1539 char *buf)
1540{
1541 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1542 int count = PAGE_SIZE;
1543 int l;
1544 struct timeval tv;
1545 unsigned long long cnt;
1546 uint32_t sec;
1547 uint32_t ns;
1548 uint32_t ts_hi;
1549 uint32_t ts_lo;
1550
1551 /* insure that ts_hi didn't increment during read */
1552 do {
1553 ts_hi = qfec_reg_read(priv, TS_HIGH_REG);
1554 ts_lo = qfec_reg_read(priv, TS_LOW_REG);
1555 } while (ts_hi != qfec_reg_read(priv, TS_HIGH_REG));
1556
1557 cnt = ts_hi;
1558 cnt <<= TS_LOW_REG_BITS;
1559 cnt |= ts_lo;
1560
1561 do_gettimeofday(&tv);
1562
1563 ts_hi = cnt >> 32;
1564 ts_lo = cnt & 0xffffffff;
1565
1566 qfec_get_sec(&cnt, &sec, &ns);
1567
1568 l = snprintf(buf, count,
1569 "%12u.%09u sec 0x%08x 0x%08x tstamp %12u.%06u time-of-day\n",
1570 sec, ns, ts_hi, ts_lo, (int)tv.tv_sec, (int)tv.tv_usec);
1571
1572 return l;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001573}
1574
1575/*
1576 * free transmitted skbufs from buffer-descriptor no owned by HW
1577 */
1578static int qfec_tx_replenish(struct net_device *dev)
1579{
1580 struct qfec_priv *priv = netdev_priv(dev);
1581 struct ring *p_ring = &priv->ring_tbd;
1582 struct buf_desc *p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1583 struct sk_buff *skb;
1584 unsigned long flags;
1585
1586 CNTR_INC(priv, tx_replenish);
1587
1588 spin_lock_irqsave(&priv->xmit_lock, flags);
1589
1590 while (!qfec_ring_empty(p_ring)) {
1591 if (qfec_bd_own(p_bd))
1592 break; /* done for now */
1593
1594 skb = qfec_bd_skbuf_get(p_bd);
1595 if (unlikely(skb == NULL)) {
1596 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1597 CNTR_INC(priv, tx_skb_null);
1598 break;
1599 }
1600
1601 qfec_reg_write(priv, STATUS_REG,
1602 STATUS_REG_TU | STATUS_REG_TI);
1603
1604 /* retrieve timestamp if requested */
1605 if (qfec_bd_status_get(p_bd) & BUF_TX_TTSS) {
1606 CNTR_INC(priv, ts_tx_rtn);
1607 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1608 skb_tstamp_tx(skb, skb_hwtstamps(skb));
1609 }
1610
1611 /* update statistics before freeing skb */
1612 priv->stats.tx_packets++;
1613 priv->stats.tx_bytes += skb->len;
1614
1615 dma_unmap_single(&dev->dev, (dma_addr_t) qfec_bd_pbuf_get(p_bd),
1616 skb->len, DMA_TO_DEVICE);
1617
1618 dev_kfree_skb_any(skb);
1619 qfec_bd_skbuf_set(p_bd, NULL);
1620
1621 qfec_ring_tail_adv(p_ring);
1622 p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1623 }
1624
1625 spin_unlock_irqrestore(&priv->xmit_lock, flags);
1626
1627 qfec_queue_start(dev);
1628
1629 return 0;
1630}
1631
1632/*
1633 * clear ownership bits of all TX buf-desc and release the sk-bufs
1634 */
1635static void qfec_tx_timeout(struct net_device *dev)
1636{
1637 struct qfec_priv *priv = netdev_priv(dev);
1638 struct buf_desc *bd = priv->p_tbd;
1639 int n;
1640
1641 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1642 CNTR_INC(priv, tx_timeout);
1643
1644 for (n = 0; n < priv->n_tbd; n++, bd++)
1645 qfec_bd_own_clr(bd);
1646
1647 qfec_tx_replenish(dev);
1648}
1649
1650/*
1651 * rx() - process a received frame
1652 */
1653static void qfec_rx_int(struct net_device *dev)
1654{
1655 struct qfec_priv *priv = netdev_priv(dev);
1656 struct ring *p_ring = &priv->ring_rbd;
1657 struct buf_desc *p_bd = priv->p_latest_rbd;
1658 uint32_t desc_status;
1659 uint32_t mis_fr_reg;
1660
1661 desc_status = qfec_bd_status_get(p_bd);
1662 mis_fr_reg = qfec_reg_read(priv, MIS_FR_REG);
1663
1664 CNTR_INC(priv, rx_int);
1665
1666 /* check that valid interrupt occurred */
Rohit Vaswani73299b42011-12-16 13:38:02 -08001667 if (unlikely(desc_status & BUF_OWN))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001669
1670 /* accumulate missed-frame count (reg reset when read) */
1671 priv->stats.rx_missed_errors += mis_fr_reg
1672 & MIS_FR_REG_MISS_CNT;
1673
1674 /* process all unowned frames */
1675 while (!(desc_status & BUF_OWN) && (!qfec_ring_full(p_ring))) {
1676 struct sk_buff *skb;
1677 struct buf_desc *p_bd_next;
1678
1679 skb = qfec_bd_skbuf_get(p_bd);
1680
1681 if (unlikely(skb == NULL)) {
1682 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1683 CNTR_INC(priv, rx_skb_null);
1684 break;
1685 }
1686
1687 /* cache coherency before skb->data is accessed */
1688 dma_unmap_single(&dev->dev,
1689 (dma_addr_t) qfec_bd_phys_get(p_bd),
1690 ETH_BUF_SIZE, DMA_FROM_DEVICE);
1691 prefetch(skb->data);
1692
1693 if (unlikely(desc_status & BUF_RX_ES)) {
1694 priv->stats.rx_dropped++;
1695 CNTR_INC(priv, rx_dropped);
1696 dev_kfree_skb(skb);
1697 } else {
1698 qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
1699
1700 skb->len = BUF_RX_FL_GET_FROM_STATUS(desc_status);
1701
1702 if (priv->state & timestamping) {
1703 CNTR_INC(priv, ts_rec);
1704 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1705 }
1706
1707 /* update statistics before freeing skb */
1708 priv->stats.rx_packets++;
1709 priv->stats.rx_bytes += skb->len;
1710
1711 skb->dev = dev;
1712 skb->protocol = eth_type_trans(skb, dev);
1713 skb->ip_summed = CHECKSUM_UNNECESSARY;
1714
1715 if (NET_RX_DROP == netif_rx(skb)) {
1716 priv->stats.rx_dropped++;
1717 CNTR_INC(priv, rx_dropped);
1718 }
1719 CNTR_INC(priv, netif_rx_cntr);
1720 }
1721
1722 if (p_bd != priv->p_ending_rbd)
1723 p_bd_next = p_bd + 1;
1724 else
1725 p_bd_next = priv->p_rbd;
1726 desc_status = qfec_bd_status_get(p_bd_next);
1727
1728 qfec_bd_skbuf_set(p_bd, NULL);
1729
1730 qfec_ring_head_adv(p_ring);
1731 p_bd = p_bd_next;
1732 }
1733
1734 priv->p_latest_rbd = p_bd;
1735
1736 /* replenish bufs */
1737 while (!qfec_ring_empty(p_ring)) {
1738 if (qfec_rbd_init(dev, &priv->p_rbd[qfec_ring_tail(p_ring)]))
1739 break;
1740 qfec_ring_tail_adv(p_ring);
1741 }
Rohit Vaswani73299b42011-12-16 13:38:02 -08001742
1743 qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001744}
1745
1746/*
1747 * isr() - interrupt service routine
1748 * determine cause of interrupt and invoke/schedule appropriate
1749 * processing or error handling
1750 */
1751#define ISR_ERR_CHK(priv, status, interrupt, cntr) \
1752 if (status & interrupt) \
1753 CNTR_INC(priv, cntr)
1754
1755static irqreturn_t qfec_int(int irq, void *dev_id)
1756{
1757 struct net_device *dev = dev_id;
1758 struct qfec_priv *priv = netdev_priv(dev);
1759 uint32_t status = qfec_reg_read(priv, STATUS_REG);
1760 uint32_t int_bits = STATUS_REG_NIS | STATUS_REG_AIS;
1761
1762 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, dev->name);
1763
1764 /* abnormal interrupt */
1765 if (status & STATUS_REG_AIS) {
1766 QFEC_LOG(QFEC_LOG_DBG, "%s: abnormal status 0x%08x\n",
1767 __func__, status);
1768
1769 ISR_ERR_CHK(priv, status, STATUS_REG_RU, rx_buf_unavail);
1770 ISR_ERR_CHK(priv, status, STATUS_REG_FBI, fatal_bus);
1771
1772 ISR_ERR_CHK(priv, status, STATUS_REG_RWT, rx_watchdog);
1773 ISR_ERR_CHK(priv, status, STATUS_REG_RPS, rx_proc_stopped);
1774 ISR_ERR_CHK(priv, status, STATUS_REG_UNF, tx_underflow);
1775
1776 ISR_ERR_CHK(priv, status, STATUS_REG_OVF, rx_overflow);
1777 ISR_ERR_CHK(priv, status, STATUS_REG_TJT, tx_jabber_tmout);
1778 ISR_ERR_CHK(priv, status, STATUS_REG_TPS, tx_proc_stopped);
1779
1780 int_bits |= STATUS_REG_AIS_BITS;
1781 CNTR_INC(priv, abnorm_int);
1782 }
1783
1784 if (status & STATUS_REG_NIS)
1785 CNTR_INC(priv, norm_int);
1786
1787 /* receive interrupt */
Rohit Vaswani73299b42011-12-16 13:38:02 -08001788 if (status & STATUS_REG_RI) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789 CNTR_INC(priv, rx_isr);
1790 qfec_rx_int(dev);
1791 }
1792
1793 /* transmit interrupt */
1794 if (status & STATUS_REG_TI) {
1795 CNTR_INC(priv, tx_isr);
1796 qfec_tx_replenish(dev);
1797 }
1798
1799 /* gmac interrupt */
1800 if (status & (STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI)) {
Rohit Vaswani73299b42011-12-16 13:38:02 -08001801 status &= ~(STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001802 CNTR_INC(priv, gmac_isr);
Rohit Vaswani73299b42011-12-16 13:38:02 -08001803 int_bits |= STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI;
1804 qfec_reg_read(priv, SG_RG_SMII_STATUS_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001805 }
1806
1807 /* clear interrupts */
1808 qfec_reg_write(priv, STATUS_REG, int_bits);
1809 CNTR_INC(priv, isr);
1810
1811 return IRQ_HANDLED;
1812}
1813
1814/*
1815 * open () - register system resources (IRQ, DMA, ...)
1816 * turn on HW, perform device setup.
1817 */
1818static int qfec_open(struct net_device *dev)
1819{
1820 struct qfec_priv *priv = netdev_priv(dev);
1821 struct buf_desc *p_bd;
1822 struct ring *p_ring;
1823 struct qfec_buf_desc *p_desc;
1824 int n;
1825 int res = 0;
1826
1827 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1828
1829 if (!dev) {
1830 res = -EINVAL;
1831 goto err;
1832 }
1833
1834 /* allocate TX/RX buffer-descriptors and buffers */
1835
1836 res = qfec_mem_alloc(dev);
1837 if (res)
1838 goto err;
1839
1840 /* initialize TX */
1841 p_desc = priv->bd_base;
1842
1843 for (n = 0, p_bd = priv->p_tbd; n < priv->n_tbd; n++, p_bd++) {
1844 p_bd->p_desc = p_desc++;
1845
1846 if (n == (priv->n_tbd - 1))
1847 qfec_bd_last_bd_set(p_bd);
1848
1849 qfec_bd_own_clr(p_bd); /* clear ownership */
1850 }
1851
1852 qfec_ring_init(&priv->ring_tbd, priv->n_tbd, priv->n_tbd);
1853
1854 priv->tx_ic_mod = priv->n_tbd / TX_BD_TI_RATIO;
1855 if (priv->tx_ic_mod == 0)
1856 priv->tx_ic_mod = 1;
1857
1858 /* initialize RX buffer descriptors and allocate sk_bufs */
1859 p_ring = &priv->ring_rbd;
1860 qfec_ring_init(p_ring, priv->n_rbd, 0);
1861 qfec_bd_last_bd_set(&priv->p_rbd[priv->n_rbd - 1]);
1862
1863 for (n = 0, p_bd = priv->p_rbd; n < priv->n_rbd; n++, p_bd++) {
1864 p_bd->p_desc = p_desc++;
1865
1866 if (qfec_rbd_init(dev, p_bd))
1867 break;
1868 qfec_ring_tail_adv(p_ring);
1869 }
1870
1871 priv->p_latest_rbd = priv->p_rbd;
1872 priv->p_ending_rbd = priv->p_rbd + priv->n_rbd - 1;
1873
1874 /* config ptp clock */
1875 qfec_ptp_cfg(priv);
1876
1877 /* configure PHY - must be set before reset/hw_init */
Rohit Vaswani73299b42011-12-16 13:38:02 -08001878 priv->mii.supports_gmii = mii_check_gmii_support(&priv->mii);
1879 if (priv->mii.supports_gmii) {
1880 QFEC_LOG_ERR("%s: RGMII\n", __func__);
1881 qfec_intf_sel(priv, INTFC_RGMII);
1882 } else {
1883 QFEC_LOG_ERR("%s: MII\n", __func__);
1884 qfec_intf_sel(priv, INTFC_MII);
1885 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886
1887 /* initialize controller after BDs allocated */
1888 res = qfec_hw_init(priv);
1889 if (res)
1890 goto err1;
1891
1892 /* get/set (primary) MAC address */
1893 qfec_set_adr_regs(priv, dev->dev_addr);
Rohit Vaswani0565a2d2011-09-15 12:53:07 -07001894 qfec_set_rx_mode(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001895
1896 /* start phy monitor */
1897 QFEC_LOG(QFEC_LOG_DBG, " %s: start timer\n", __func__);
1898 netif_carrier_off(priv->net_dev);
1899 setup_timer(&priv->phy_tmr, qfec_phy_monitor, (unsigned long)dev);
1900 mod_timer(&priv->phy_tmr, jiffies + HZ);
1901
1902 /* initialize interrupts */
1903 QFEC_LOG(QFEC_LOG_DBG, " %s: request irq %d\n", __func__, dev->irq);
1904 res = request_irq(dev->irq, qfec_int, 0, dev->name, dev);
1905 if (res)
1906 goto err1;
1907
1908 /* enable controller */
1909 qfec_hw_enable(priv);
1910 netif_start_queue(dev);
1911
1912 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x link, %08x carrier\n", __func__,
1913 mii_link_ok(&priv->mii), netif_carrier_ok(priv->net_dev));
1914
1915 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1916 return 0;
1917
1918err1:
1919 qfec_mem_dealloc(dev);
1920err:
1921 QFEC_LOG_ERR("%s: error - %d\n", __func__, res);
1922 return res;
1923}
1924
1925/*
1926 * stop() - "reverse operations performed at open time"
1927 */
1928static int qfec_stop(struct net_device *dev)
1929{
1930 struct qfec_priv *priv = netdev_priv(dev);
1931 struct buf_desc *p_bd;
1932 struct sk_buff *skb;
1933 int n;
1934
1935 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1936
1937 del_timer_sync(&priv->phy_tmr);
1938
1939 qfec_hw_disable(priv);
1940 qfec_queue_stop(dev);
1941 free_irq(dev->irq, dev);
1942
1943 /* free all pending sk_bufs */
1944 for (n = priv->n_rbd, p_bd = priv->p_rbd; n > 0; n--, p_bd++) {
1945 skb = qfec_bd_skbuf_get(p_bd);
1946 if (skb)
1947 dev_kfree_skb(skb);
1948 }
1949
1950 for (n = priv->n_tbd, p_bd = priv->p_tbd; n > 0; n--, p_bd++) {
1951 skb = qfec_bd_skbuf_get(p_bd);
1952 if (skb)
1953 dev_kfree_skb(skb);
1954 }
1955
1956 qfec_mem_dealloc(dev);
1957
1958 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1959
1960 return 0;
1961}
1962
1963static int qfec_set_config(struct net_device *dev, struct ifmap *map)
1964{
1965 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1966 return 0;
1967}
1968
1969/*
1970 * pass data from skbuf to buf-desc
1971 */
1972static int qfec_xmit(struct sk_buff *skb, struct net_device *dev)
1973{
1974 struct qfec_priv *priv = netdev_priv(dev);
1975 struct ring *p_ring = &priv->ring_tbd;
1976 struct buf_desc *p_bd;
1977 uint32_t ctrl = 0;
1978 int ret = NETDEV_TX_OK;
1979 unsigned long flags;
1980
1981 CNTR_INC(priv, xmit);
1982
1983 spin_lock_irqsave(&priv->xmit_lock, flags);
1984
Rohit Vaswani73299b42011-12-16 13:38:02 -08001985 /* If there is no room, on the ring try to free some up */
1986 if (qfec_ring_room(p_ring) == 0)
1987 qfec_tx_replenish(dev);
1988
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001989 /* stop queuing if no resources available */
1990 if (qfec_ring_room(p_ring) == 0) {
1991 qfec_queue_stop(dev);
1992 CNTR_INC(priv, tx_no_resource);
1993
1994 ret = NETDEV_TX_BUSY;
1995 goto done;
1996 }
1997
1998 /* locate and save *sk_buff */
1999 p_bd = &priv->p_tbd[qfec_ring_head(p_ring)];
2000 qfec_bd_skbuf_set(p_bd, skb);
2001
2002 /* set DMA ptr to sk_buff data and write cache to memory */
2003 qfec_bd_pbuf_set(p_bd, (void *)
2004 dma_map_single(&dev->dev,
2005 (void *)skb->data, skb->len, DMA_TO_DEVICE));
2006
2007 ctrl = skb->len;
2008 if (!(qfec_ring_head(p_ring) % priv->tx_ic_mod))
2009 ctrl |= BUF_TX_IC; /* interrupt on complete */
2010
2011 /* check if timestamping enabled and requested */
2012 if (priv->state & timestamping) {
2013 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2014 CNTR_INC(priv, ts_tx_en);
2015 ctrl |= BUF_TX_IC; /* interrupt on complete */
2016 ctrl |= BUF_TX_TTSE; /* enable timestamp */
2017 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2018 }
2019 }
2020
2021 if (qfec_bd_last_bd(p_bd))
2022 ctrl |= BUF_RX_RER;
2023
2024 /* no gather, no multi buf frames */
2025 ctrl |= BUF_TX_FS | BUF_TX_LS; /* 1st and last segment */
2026
2027 qfec_bd_ctl_wr(p_bd, ctrl);
2028 qfec_bd_status_set(p_bd, BUF_OWN);
2029
2030 qfec_ring_head_adv(p_ring);
2031 qfec_reg_write(priv, TX_POLL_DEM_REG, 1); /* poll */
2032
2033done:
2034 spin_unlock_irqrestore(&priv->xmit_lock, flags);
2035
2036 return ret;
2037}
2038
2039static int qfec_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2040{
2041 struct qfec_priv *priv = netdev_priv(dev);
2042 struct hwtstamp_config *cfg = (struct hwtstamp_config *) ifr;
2043
2044 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2045
2046 if (cmd == SIOCSHWTSTAMP) {
2047 CNTR_INC(priv, ts_ioctl);
2048 QFEC_LOG(QFEC_LOG_DBG,
2049 "%s: SIOCSHWTSTAMP - %x flags %x tx %x rx\n",
2050 __func__, cfg->flags, cfg->tx_type, cfg->rx_filter);
2051
2052 cfg->flags = 0;
2053 cfg->tx_type = HWTSTAMP_TX_ON;
2054 cfg->rx_filter = HWTSTAMP_FILTER_ALL;
2055
2056 priv->state |= timestamping;
2057 qfec_reg_write(priv, TS_CTL_REG,
2058 qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSENALL);
2059
2060 return 0;
2061 }
2062
2063 return generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL);
2064}
2065
2066static struct net_device_stats *qfec_get_stats(struct net_device *dev)
2067{
2068 struct qfec_priv *priv = netdev_priv(dev);
2069
2070 QFEC_LOG(QFEC_LOG_DBG2, "qfec_stats:\n");
2071
Rohit Vaswani0565a2d2011-09-15 12:53:07 -07002072 priv->stats.multicast = qfec_reg_read(priv, NUM_MULTCST_FRM_RCVD_G);
2073
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002074 return &priv->stats;
2075}
2076
2077/*
2078 * accept new mac address
2079 */
2080static int qfec_set_mac_address(struct net_device *dev, void *p)
2081{
2082 struct qfec_priv *priv = netdev_priv(dev);
2083 struct sockaddr *addr = p;
2084
2085 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2086
2087 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2088
2089 qfec_set_adr_regs(priv, dev->dev_addr);
2090
2091 return 0;
2092}
2093
2094/*
2095 * read discontinuous MAC address from corrected fuse memory region
2096 */
2097
2098static int qfec_get_mac_address(char *buf, char *mac_base, int nBytes)
2099{
2100 static int offset[] = { 0, 1, 2, 3, 4, 8 };
2101 int n;
2102
2103 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2104
2105 for (n = 0; n < nBytes; n++)
2106 buf[n] = ioread8(mac_base + offset[n]);
2107
2108 /* check that MAC programmed */
2109 if ((buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5]) == 0) {
2110 QFEC_LOG_ERR("%s: null MAC address\n", __func__);
2111 return -ENODATA;
2112 }
2113
2114 return 0;
2115}
2116
2117/*
2118 * static definition of driver functions
2119 */
2120static const struct net_device_ops qfec_netdev_ops = {
2121 .ndo_open = qfec_open,
2122 .ndo_stop = qfec_stop,
2123 .ndo_start_xmit = qfec_xmit,
2124
2125 .ndo_do_ioctl = qfec_do_ioctl,
2126 .ndo_tx_timeout = qfec_tx_timeout,
2127 .ndo_set_mac_address = qfec_set_mac_address,
Rohit Vaswani0565a2d2011-09-15 12:53:07 -07002128 .ndo_set_multicast_list = qfec_set_rx_mode,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002129
2130 .ndo_change_mtu = eth_change_mtu,
2131 .ndo_validate_addr = eth_validate_addr,
2132
2133 .ndo_get_stats = qfec_get_stats,
2134 .ndo_set_config = qfec_set_config,
2135};
2136
2137/*
2138 * ethtool functions
2139 */
2140
2141static int qfec_nway_reset(struct net_device *dev)
2142{
2143 struct qfec_priv *priv = netdev_priv(dev);
2144 return mii_nway_restart(&priv->mii);
2145}
2146
2147/*
2148 * speed, duplex, auto-neg settings
2149 */
2150static void qfec_ethtool_getpauseparam(struct net_device *dev,
2151 struct ethtool_pauseparam *pp)
2152{
2153 struct qfec_priv *priv = netdev_priv(dev);
2154 u32 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
2155 u32 advert;
2156
2157 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2158
2159 /* report current settings */
2160 pp->tx_pause = (flow & FLOW_CONTROL_TFE) != 0;
2161 pp->rx_pause = (flow & FLOW_CONTROL_RFE) != 0;
2162
2163 /* report if pause is being advertised */
2164 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
2165 pp->autoneg =
2166 (advert & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2167}
2168
2169static int qfec_ethtool_setpauseparam(struct net_device *dev,
2170 struct ethtool_pauseparam *pp)
2171{
2172 struct qfec_priv *priv = netdev_priv(dev);
2173 u32 advert;
2174
2175 QFEC_LOG(QFEC_LOG_DBG, "%s: %d aneg, %d rx, %d tx\n", __func__,
2176 pp->autoneg, pp->rx_pause, pp->tx_pause);
2177
2178 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
2179 advert &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2180
2181 /* If pause autonegotiation is enabled, but both rx and tx are not
2182 * because neither was specified in the ethtool cmd,
2183 * enable both symetrical and asymetrical pause.
2184 * otherwise, only enable the pause mode indicated by rx/tx.
2185 */
2186 if (pp->autoneg) {
2187 if (pp->rx_pause)
2188 advert |= ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP;
2189 else if (pp->tx_pause)
2190 advert |= ADVERTISE_PAUSE_ASYM;
2191 else
2192 advert |= ADVERTISE_PAUSE_CAP;
2193 }
2194
2195 qfec_mdio_write(dev, priv->phy_id, MII_ADVERTISE, advert);
2196
2197 return 0;
2198}
2199
2200/*
2201 * ethtool ring parameter (-g/G) support
2202 */
2203
2204/*
2205 * setringparamam - change the tx/rx ring lengths
2206 */
2207#define MIN_RING_SIZE 3
2208#define MAX_RING_SIZE 1000
2209static int qfec_ethtool_setringparam(struct net_device *dev,
2210 struct ethtool_ringparam *ring)
2211{
2212 struct qfec_priv *priv = netdev_priv(dev);
2213 u32 timeout = 20;
2214
2215 /* notify stack the link is down */
2216 netif_carrier_off(dev);
2217
2218 /* allow tx to complete & free skbufs on the tx ring */
2219 do {
2220 usleep_range(10000, 100000);
2221 qfec_tx_replenish(dev);
2222
2223 if (timeout-- == 0) {
2224 QFEC_LOG_ERR("%s: timeout\n", __func__);
2225 return -ETIME;
2226 }
2227 } while (!qfec_ring_empty(&priv->ring_tbd));
2228
2229
2230 qfec_stop(dev);
2231
2232 /* set tx ring size */
2233 if (ring->tx_pending < MIN_RING_SIZE)
2234 ring->tx_pending = MIN_RING_SIZE;
2235 else if (ring->tx_pending > MAX_RING_SIZE)
2236 ring->tx_pending = MAX_RING_SIZE;
2237 priv->n_tbd = ring->tx_pending;
2238
2239 /* set rx ring size */
2240 if (ring->rx_pending < MIN_RING_SIZE)
2241 ring->rx_pending = MIN_RING_SIZE;
2242 else if (ring->rx_pending > MAX_RING_SIZE)
2243 ring->rx_pending = MAX_RING_SIZE;
2244 priv->n_rbd = ring->rx_pending;
2245
2246
2247 qfec_open(dev);
2248
2249 return 0;
2250}
2251
2252/*
2253 * getringparamam - returns local values
2254 */
2255static void qfec_ethtool_getringparam(struct net_device *dev,
2256 struct ethtool_ringparam *ring)
2257{
2258 struct qfec_priv *priv = netdev_priv(dev);
2259
2260 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2261
2262 ring->rx_max_pending = MAX_RING_SIZE;
2263 ring->rx_mini_max_pending = 0;
2264 ring->rx_jumbo_max_pending = 0;
2265 ring->tx_max_pending = MAX_RING_SIZE;
2266
2267 ring->rx_pending = priv->n_rbd;
2268 ring->rx_mini_pending = 0;
2269 ring->rx_jumbo_pending = 0;
2270 ring->tx_pending = priv->n_tbd;
2271}
2272
2273/*
2274 * speed, duplex, auto-neg settings
2275 */
2276static int
2277qfec_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2278{
2279 struct qfec_priv *priv = netdev_priv(dev);
2280
2281 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2282
2283 cmd->maxrxpkt = priv->n_rbd;
2284 cmd->maxtxpkt = priv->n_tbd;
2285
2286 return mii_ethtool_gset(&priv->mii, cmd);
2287}
2288
2289static int
2290qfec_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2291{
2292 struct qfec_priv *priv = netdev_priv(dev);
2293
2294 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2295
2296 return mii_ethtool_sset(&priv->mii, cmd);
2297}
2298
2299/*
2300 * msg/debug level
2301 */
2302static u32 qfec_ethtool_getmsglevel(struct net_device *dev)
2303{
2304 return qfec_debug;
2305}
2306
2307static void qfec_ethtool_setmsglevel(struct net_device *dev, u32 level)
2308{
2309 qfec_debug ^= level; /* toggle on/off */
2310}
2311
2312/*
2313 * register dump
2314 */
2315#define DMA_DMP_OFFSET 0x0000
2316#define DMA_REG_OFFSET 0x1000
2317#define DMA_REG_LEN 23
2318
2319#define MAC_DMP_OFFSET 0x0080
2320#define MAC_REG_OFFSET 0x0000
2321#define MAC_REG_LEN 55
2322
2323#define TS_DMP_OFFSET 0x0180
2324#define TS_REG_OFFSET 0x0700
2325#define TS_REG_LEN 15
2326
2327#define MDIO_DMP_OFFSET 0x0200
2328#define MDIO_REG_LEN 16
2329
2330#define REG_SIZE (MDIO_DMP_OFFSET + (MDIO_REG_LEN * sizeof(short)))
2331
2332static int qfec_ethtool_getregs_len(struct net_device *dev)
2333{
2334 return REG_SIZE;
2335}
2336
2337static void
2338qfec_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
2339 void *buf)
2340{
2341 struct qfec_priv *priv = netdev_priv(dev);
2342 u32 *data = buf;
2343 u16 *data16;
2344 unsigned int i;
2345 unsigned int j;
2346 unsigned int n;
2347
2348 memset(buf, 0, REG_SIZE);
2349
2350 j = DMA_DMP_OFFSET / sizeof(u32);
2351 for (i = DMA_REG_OFFSET, n = DMA_REG_LEN; n--; i += sizeof(u32))
2352 data[j++] = htonl(qfec_reg_read(priv, i));
2353
2354 j = MAC_DMP_OFFSET / sizeof(u32);
2355 for (i = MAC_REG_OFFSET, n = MAC_REG_LEN; n--; i += sizeof(u32))
2356 data[j++] = htonl(qfec_reg_read(priv, i));
2357
2358 j = TS_DMP_OFFSET / sizeof(u32);
2359 for (i = TS_REG_OFFSET, n = TS_REG_LEN; n--; i += sizeof(u32))
2360 data[j++] = htonl(qfec_reg_read(priv, i));
2361
2362 data16 = (u16 *)&data[MDIO_DMP_OFFSET / sizeof(u32)];
2363 for (i = 0, n = 0; i < MDIO_REG_LEN; i++)
2364 data16[n++] = htons(qfec_mdio_read(dev, 0, i));
2365
2366 regs->len = REG_SIZE;
2367
2368 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, regs->len);
2369}
2370
2371/*
2372 * statistics
2373 * return counts of various ethernet activity.
2374 * many of these are same as in struct net_device_stats
2375 *
2376 * missed-frames indicates the number of attempts made by the ethernet
2377 * controller to write to a buffer-descriptor when the BD ownership
2378 * bit was not set. The rxfifooverflow counter (0x1D4) is not
2379 * available. The Missed Frame and Buffer Overflow Counter register
2380 * (0x1020) is used, but has only 16-bits and is reset when read.
2381 * It is read and updates the value in priv->stats.rx_missed_errors
2382 * in qfec_rx_int().
2383 */
2384static char qfec_stats_strings[][ETH_GSTRING_LEN] = {
2385 "TX good/bad Bytes ",
2386 "TX Bytes ",
2387 "TX good/bad Frames ",
2388 "TX Bcast Frames ",
2389 "TX Mcast Frames ",
2390 "TX Unicast Frames ",
2391 "TX Pause Frames ",
2392 "TX Vlan Frames ",
2393 "TX Frames 64 ",
2394 "TX Frames 65-127 ",
2395 "TX Frames 128-255 ",
2396 "TX Frames 256-511 ",
2397 "TX Frames 512-1023 ",
2398 "TX Frames 1024+ ",
2399 "TX Pause Frames ",
2400 "TX Collisions ",
2401 "TX Late Collisions ",
2402 "TX Excessive Collisions ",
2403
2404 "RX good/bad Bytes ",
2405 "RX Bytes ",
2406 "RX good/bad Frames ",
2407 "RX Bcast Frames ",
2408 "RX Mcast Frames ",
2409 "RX Unicast Frames ",
2410 "RX Pause Frames ",
2411 "RX Vlan Frames ",
2412 "RX Frames 64 ",
2413 "RX Frames 65-127 ",
2414 "RX Frames 128-255 ",
2415 "RX Frames 256-511 ",
2416 "RX Frames 512-1023 ",
2417 "RX Frames 1024+ ",
2418 "RX Pause Frames ",
2419 "RX Crc error Frames ",
2420 "RX Length error Frames ",
2421 "RX Alignment error Frames ",
2422 "RX Runt Frames ",
2423 "RX Oversize Frames ",
2424 "RX Missed Frames ",
2425
2426};
2427
2428static u32 qfec_stats_regs[] = {
2429
2430 69, 89, 70, 71, 72, 90, 92, 93,
2431 73, 74, 75, 76, 77, 78, 92, 84,
2432 86, 87,
2433
2434 97, 98, 96, 99, 100, 113, 116, 118,
2435 107, 108, 109, 110, 111, 112, 116, 101,
2436 114, 102, 103, 106
2437};
2438
2439static int qfec_stats_show(struct device *dev, struct device_attribute *attr,
2440 char *buf)
2441{
2442 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
2443 int count = PAGE_SIZE;
2444 int l = 0;
2445 int n;
2446
2447 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
2448
2449 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++) {
2450 l += snprintf(&buf[l], count - l, " %12u %s\n",
2451 qfec_reg_read(priv,
2452 qfec_stats_regs[n] * sizeof(uint32_t)),
2453 qfec_stats_strings[n]);
2454 }
2455
2456 return l;
2457}
2458
2459static int qfec_get_sset_count(struct net_device *dev, int sset)
2460{
2461 switch (sset) {
2462 case ETH_SS_STATS:
2463 return ARRAY_SIZE(qfec_stats_regs) + 1; /* missed frames */
2464
2465 default:
2466 return -EOPNOTSUPP;
2467 }
2468}
2469
2470static void qfec_ethtool_getstrings(struct net_device *dev, u32 stringset,
2471 u8 *buf)
2472{
2473 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__,
2474 sizeof(qfec_stats_strings));
2475
2476 memcpy(buf, qfec_stats_strings, sizeof(qfec_stats_strings));
2477}
2478
2479static void qfec_ethtool_getstats(struct net_device *dev,
2480 struct ethtool_stats *stats, uint64_t *data)
2481{
2482 struct qfec_priv *priv = netdev_priv(dev);
2483 int j = 0;
2484 int n;
2485
2486 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)
2487 data[j++] = qfec_reg_read(priv,
2488 qfec_stats_regs[n] * sizeof(uint32_t));
2489
2490 data[j++] = priv->stats.rx_missed_errors;
2491
2492 stats->n_stats = j;
2493}
2494
2495static void qfec_ethtool_getdrvinfo(struct net_device *dev,
2496 struct ethtool_drvinfo *info)
2497{
2498 strlcpy(info->driver, QFEC_NAME, sizeof(info->driver));
2499 strlcpy(info->version, QFEC_DRV_VER, sizeof(info->version));
2500 strlcpy(info->bus_info, dev_name(dev->dev.parent),
2501 sizeof(info->bus_info));
2502
2503 info->eedump_len = 0;
2504 info->regdump_len = qfec_ethtool_getregs_len(dev);
2505}
2506
2507/*
2508 * ethtool ops table
2509 */
2510static const struct ethtool_ops qfec_ethtool_ops = {
2511 .nway_reset = qfec_nway_reset,
2512
2513 .get_settings = qfec_ethtool_getsettings,
2514 .set_settings = qfec_ethtool_setsettings,
2515 .get_link = ethtool_op_get_link,
2516 .get_drvinfo = qfec_ethtool_getdrvinfo,
2517 .get_msglevel = qfec_ethtool_getmsglevel,
2518 .set_msglevel = qfec_ethtool_setmsglevel,
2519 .get_regs_len = qfec_ethtool_getregs_len,
2520 .get_regs = qfec_ethtool_getregs,
2521
2522 .get_ringparam = qfec_ethtool_getringparam,
2523 .set_ringparam = qfec_ethtool_setringparam,
2524
2525 .get_pauseparam = qfec_ethtool_getpauseparam,
2526 .set_pauseparam = qfec_ethtool_setpauseparam,
2527
2528 .get_sset_count = qfec_get_sset_count,
2529 .get_strings = qfec_ethtool_getstrings,
2530 .get_ethtool_stats = qfec_ethtool_getstats,
2531};
2532
2533/*
2534 * create sysfs entries
2535 */
2536static DEVICE_ATTR(bd_tx, 0444, qfec_bd_tx_show, NULL);
2537static DEVICE_ATTR(bd_rx, 0444, qfec_bd_rx_show, NULL);
2538static DEVICE_ATTR(cfg, 0444, qfec_config_show, NULL);
2539static DEVICE_ATTR(clk_reg, 0444, qfec_clk_reg_show, NULL);
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002540static DEVICE_ATTR(cmd, 0222, NULL, qfec_cmd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002541static DEVICE_ATTR(cntrs, 0444, qfec_cntrs_show, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002542static DEVICE_ATTR(reg, 0444, qfec_reg_show, NULL);
Rohit Vaswani73299b42011-12-16 13:38:02 -08002543static DEVICE_ATTR(mdio, 0444, qfec_mdio_show, NULL);
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002544static DEVICE_ATTR(stats, 0444, qfec_stats_show, NULL);
2545static DEVICE_ATTR(tstamp, 0444, qfec_tstamp_show, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002546
2547static void qfec_sysfs_create(struct net_device *dev)
2548{
2549 if (device_create_file(&(dev->dev), &dev_attr_bd_tx) ||
2550 device_create_file(&(dev->dev), &dev_attr_bd_rx) ||
2551 device_create_file(&(dev->dev), &dev_attr_cfg) ||
2552 device_create_file(&(dev->dev), &dev_attr_clk_reg) ||
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002553 device_create_file(&(dev->dev), &dev_attr_cmd) ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002554 device_create_file(&(dev->dev), &dev_attr_cntrs) ||
Rohit Vaswani73299b42011-12-16 13:38:02 -08002555 device_create_file(&(dev->dev), &dev_attr_mdio) ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002556 device_create_file(&(dev->dev), &dev_attr_reg) ||
Rohit Vaswani9e03c7f2011-07-13 14:26:21 -07002557 device_create_file(&(dev->dev), &dev_attr_stats) ||
2558 device_create_file(&(dev->dev), &dev_attr_tstamp))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002559 pr_err("qfec_sysfs_create failed to create sysfs files\n");
2560}
2561
2562/*
2563 * map a specified resource
2564 */
2565static int qfec_map_resource(struct platform_device *plat, int resource,
2566 struct resource **priv_res,
2567 void **addr)
2568{
2569 struct resource *res;
2570
2571 QFEC_LOG(QFEC_LOG_DBG, "%s: 0x%x resource\n", __func__, resource);
2572
2573 /* allocate region to access controller registers */
2574 *priv_res = res = platform_get_resource(plat, resource, 0);
2575 if (!res) {
2576 QFEC_LOG_ERR("%s: platform_get_resource failed\n", __func__);
2577 return -ENODEV;
2578 }
2579
2580 res = request_mem_region(res->start, res->end - res->start, QFEC_NAME);
2581 if (!res) {
2582 QFEC_LOG_ERR("%s: request_mem_region failed, %08x %08x\n",
2583 __func__, res->start, res->end - res->start);
2584 return -EBUSY;
2585 }
2586
2587 *addr = ioremap(res->start, res->end - res->start);
2588 if (!*addr)
2589 return -ENOMEM;
2590
2591 QFEC_LOG(QFEC_LOG_DBG, " %s: io mapped from %p to %p\n",
2592 __func__, (void *)res->start, *addr);
2593
2594 return 0;
2595};
2596
2597/*
2598 * free allocated io regions
2599 */
2600static void qfec_free_res(struct resource *res, void *base)
2601{
2602
2603 if (res) {
2604 if (base)
2605 iounmap((void __iomem *)base);
2606
2607 release_mem_region(res->start, res->end - res->start);
2608 }
2609};
2610
2611/*
2612 * probe function that obtain configuration info and allocate net_device
2613 */
2614static int __devinit qfec_probe(struct platform_device *plat)
2615{
2616 struct net_device *dev;
2617 struct qfec_priv *priv;
2618 int ret = 0;
2619
2620 /* allocate device */
2621 dev = alloc_etherdev(sizeof(struct qfec_priv));
2622 if (!dev) {
2623 QFEC_LOG_ERR("%s: alloc_etherdev failed\n", __func__);
2624 ret = -ENOMEM;
2625 goto err;
2626 }
2627
2628 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x dev\n", __func__, (int)dev);
2629
2630 qfec_dev = dev;
2631 SET_NETDEV_DEV(dev, &plat->dev);
2632
2633 dev->netdev_ops = &qfec_netdev_ops;
2634 dev->ethtool_ops = &qfec_ethtool_ops;
2635 dev->watchdog_timeo = 2 * HZ;
2636 dev->irq = platform_get_irq(plat, 0);
2637
2638 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2639
2640 /* initialize private data */
2641 priv = (struct qfec_priv *)netdev_priv(dev);
2642 memset((void *)priv, 0, sizeof(priv));
2643
2644 priv->net_dev = dev;
2645 platform_set_drvdata(plat, dev);
2646
2647 priv->n_tbd = TX_BD_NUM;
2648 priv->n_rbd = RX_BD_NUM;
2649
2650 /* initialize phy structure */
2651 priv->mii.phy_id_mask = 0x1F;
2652 priv->mii.reg_num_mask = 0x1F;
2653 priv->mii.dev = dev;
2654 priv->mii.mdio_read = qfec_mdio_read;
2655 priv->mii.mdio_write = qfec_mdio_write;
2656
2657 /* map register regions */
2658 ret = qfec_map_resource(
2659 plat, IORESOURCE_MEM, &priv->mac_res, &priv->mac_base);
2660 if (ret) {
2661 QFEC_LOG_ERR("%s: IORESOURCE_MEM mac failed\n", __func__);
2662 goto err1;
2663 }
2664
2665 ret = qfec_map_resource(
2666 plat, IORESOURCE_IO, &priv->clk_res, &priv->clk_base);
2667 if (ret) {
2668 QFEC_LOG_ERR("%s: IORESOURCE_IO clk failed\n", __func__);
2669 goto err2;
2670 }
2671
2672 ret = qfec_map_resource(
2673 plat, IORESOURCE_DMA, &priv->fuse_res, &priv->fuse_base);
2674 if (ret) {
2675 QFEC_LOG_ERR("%s: IORESOURCE_DMA fuse failed\n", __func__);
2676 goto err3;
2677 }
2678
2679 /* initialize MAC addr */
2680 ret = qfec_get_mac_address(dev->dev_addr, priv->fuse_base,
2681 MAC_ADDR_SIZE);
2682 if (ret)
2683 goto err4;
2684
2685 QFEC_LOG(QFEC_LOG_DBG, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
2686 __func__,
2687 dev->dev_addr[0], dev->dev_addr[1],
2688 dev->dev_addr[2], dev->dev_addr[3],
2689 dev->dev_addr[4], dev->dev_addr[5]);
2690
2691 ret = register_netdev(dev);
2692 if (ret) {
2693 QFEC_LOG_ERR("%s: register_netdev failed\n", __func__);
2694 goto err4;
2695 }
2696
2697 spin_lock_init(&priv->mdio_lock);
2698 spin_lock_init(&priv->xmit_lock);
2699 qfec_sysfs_create(dev);
2700
2701 return 0;
2702
2703 /* error handling */
2704err4:
2705 qfec_free_res(priv->fuse_res, priv->fuse_base);
2706err3:
2707 qfec_free_res(priv->clk_res, priv->clk_base);
2708err2:
2709 qfec_free_res(priv->mac_res, priv->mac_base);
2710err1:
2711 free_netdev(dev);
2712err:
2713 QFEC_LOG_ERR("%s: err\n", __func__);
2714 return ret;
2715}
2716
2717/*
2718 * module remove
2719 */
2720static int __devexit qfec_remove(struct platform_device *plat)
2721{
2722 struct net_device *dev = platform_get_drvdata(plat);
2723 struct qfec_priv *priv = netdev_priv(dev);
2724
2725 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2726
2727 platform_set_drvdata(plat, NULL);
2728
2729 qfec_free_res(priv->fuse_res, priv->fuse_base);
2730 qfec_free_res(priv->clk_res, priv->clk_base);
2731 qfec_free_res(priv->mac_res, priv->mac_base);
2732
2733 unregister_netdev(dev);
2734 free_netdev(dev);
2735
2736 return 0;
2737}
2738
2739/*
2740 * module support
2741 * the FSM9xxx is not a mobile device does not support power management
2742 */
2743
2744static struct platform_driver qfec_driver = {
2745 .probe = qfec_probe,
2746 .remove = __devexit_p(qfec_remove),
2747 .driver = {
2748 .name = QFEC_NAME,
2749 .owner = THIS_MODULE,
2750 },
2751};
2752
2753/*
2754 * module init
2755 */
2756static int __init qfec_init_module(void)
2757{
2758 int res;
2759
2760 QFEC_LOG(QFEC_LOG_DBG, "%s: %s\n", __func__, qfec_driver.driver.name);
2761
2762 res = platform_driver_register(&qfec_driver);
2763
2764 QFEC_LOG(QFEC_LOG_DBG, "%s: %d - platform_driver_register\n",
2765 __func__, res);
2766
2767 return res;
2768}
2769
2770/*
2771 * module exit
2772 */
2773static void __exit qfec_exit_module(void)
2774{
2775 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2776
2777 platform_driver_unregister(&qfec_driver);
2778}
2779
2780MODULE_DESCRIPTION("FSM Network Driver");
2781MODULE_LICENSE("GPL v2");
2782MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
2783MODULE_VERSION("1.0");
2784
2785module_init(qfec_init_module);
2786module_exit(qfec_exit_module);