blob: 90e8efffa77de143986d530582c1d43c539221f9 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14
15#include <linux/platform_device.h>
16
17#include <linux/types.h> /* size_t */
18#include <linux/interrupt.h> /* mark_bh */
19
20#include <linux/netdevice.h> /* struct device, and other headers */
21#include <linux/etherdevice.h> /* eth_type_trans */
22#include <linux/skbuff.h>
23
24#include <linux/proc_fs.h>
25#include <linux/timer.h>
26#include <linux/mii.h>
27
28#include <linux/ethtool.h>
29#include <linux/net_tstamp.h>
30#include <linux/phy.h>
31#include <linux/inet.h>
32
33#include "qfec.h"
34
35#define QFEC_NAME "qfec"
36#define QFEC_DRV_VER "June 18a 2011"
37
38#define ETH_BUF_SIZE 0x600
39#define MAX_N_BD 50
40#define MAC_ADDR_SIZE 6
41
42#define RX_TX_BD_RATIO 8
43#define RX_BD_NUM 32
44#define TX_BD_NUM (RX_BD_NUM * RX_TX_BD_RATIO)
45#define TX_BD_TI_RATIO 4
46
47/*
48 * logging macros
49 */
50#define QFEC_LOG_PR 1
51#define QFEC_LOG_DBG 2
52#define QFEC_LOG_DBG2 4
53#define QFEC_LOG_MDIO_W 8
54#define QFEC_LOG_MDIO_R 16
55
56static int qfec_debug = QFEC_LOG_PR;
57
58#ifdef QFEC_DEBUG
59# define QFEC_LOG(flag, ...) \
60 do { \
61 if (flag & qfec_debug) \
62 pr_info(__VA_ARGS__); \
63 } while (0)
64#else
65# define QFEC_LOG(flag, ...)
66#endif
67
68#define QFEC_LOG_ERR(...) pr_err(__VA_ARGS__)
69
70/*
71 * driver buffer-descriptor
72 * contains the 4 word HW descriptor plus an additional 4-words.
73 * (See the DSL bits in the BUS-Mode register).
74 */
75#define BD_FLAG_LAST_BD 1
76
77struct buf_desc {
78 struct qfec_buf_desc *p_desc;
79 struct sk_buff *skb;
80 void *buf_virt_addr;
81 void *buf_phys_addr;
82 uint32_t last_bd_flag;
83};
84
85/*
86 *inline functions accessing non-struct qfec_buf_desc elements
87 */
88
89/* skb */
90static inline struct sk_buff *qfec_bd_skbuf_get(struct buf_desc *p_bd)
91{
92 return p_bd->skb;
93};
94
95static inline void qfec_bd_skbuf_set(struct buf_desc *p_bd, struct sk_buff *p)
96{
97 p_bd->skb = p;
98};
99
100/* virtual addr */
101static inline void qfec_bd_virt_set(struct buf_desc *p_bd, void *addr)
102{
103 p_bd->buf_virt_addr = addr;
104};
105
106static inline void *qfec_bd_virt_get(struct buf_desc *p_bd)
107{
108 return p_bd->buf_virt_addr;
109};
110
111/* physical addr */
112static inline void qfec_bd_phys_set(struct buf_desc *p_bd, void *addr)
113{
114 p_bd->buf_phys_addr = addr;
115};
116
117static inline void *qfec_bd_phys_get(struct buf_desc *p_bd)
118{
119 return p_bd->buf_phys_addr;
120};
121
122/* last_bd_flag */
123static inline uint32_t qfec_bd_last_bd(struct buf_desc *p_bd)
124{
125 return (p_bd->last_bd_flag != 0);
126};
127
128static inline void qfec_bd_last_bd_set(struct buf_desc *p_bd)
129{
130 p_bd->last_bd_flag = BD_FLAG_LAST_BD;
131};
132
133/*
134 *inline functions accessing struct qfec_buf_desc elements
135 */
136
137/* ownership bit */
138static inline uint32_t qfec_bd_own(struct buf_desc *p_bd)
139{
140 return p_bd->p_desc->status & BUF_OWN;
141};
142
143static inline void qfec_bd_own_set(struct buf_desc *p_bd)
144{
145 p_bd->p_desc->status |= BUF_OWN ;
146};
147
148static inline void qfec_bd_own_clr(struct buf_desc *p_bd)
149{
150 p_bd->p_desc->status &= ~(BUF_OWN);
151};
152
153static inline uint32_t qfec_bd_status_get(struct buf_desc *p_bd)
154{
155 return p_bd->p_desc->status;
156};
157
158static inline void qfec_bd_status_set(struct buf_desc *p_bd, uint32_t status)
159{
160 p_bd->p_desc->status = status;
161};
162
163static inline uint32_t qfec_bd_status_len(struct buf_desc *p_bd)
164{
165 return BUF_RX_FL_GET((*p_bd->p_desc));
166};
167
168/* control register */
169static inline void qfec_bd_ctl_reset(struct buf_desc *p_bd)
170{
171 p_bd->p_desc->ctl = 0;
172};
173
174static inline uint32_t qfec_bd_ctl_get(struct buf_desc *p_bd)
175{
176 return p_bd->p_desc->ctl;
177};
178
179static inline void qfec_bd_ctl_set(struct buf_desc *p_bd, uint32_t val)
180{
181 p_bd->p_desc->ctl |= val;
182};
183
184static inline void qfec_bd_ctl_wr(struct buf_desc *p_bd, uint32_t val)
185{
186 p_bd->p_desc->ctl = val;
187};
188
189/* pbuf register */
190static inline void *qfec_bd_pbuf_get(struct buf_desc *p_bd)
191{
192 return p_bd->p_desc->p_buf;
193}
194
195static inline void qfec_bd_pbuf_set(struct buf_desc *p_bd, void *p)
196{
197 p_bd->p_desc->p_buf = p;
198}
199
200/* next register */
201static inline void *qfec_bd_next_get(struct buf_desc *p_bd)
202{
203 return p_bd->p_desc->next;
204};
205
206/*
207 * initialize an RX BD w/ a new buf
208 */
209static int qfec_rbd_init(struct net_device *dev, struct buf_desc *p_bd)
210{
211 struct sk_buff *skb;
212 void *p;
213 void *v;
214
215 /* allocate and record ptrs for sk buff */
216 skb = dev_alloc_skb(ETH_BUF_SIZE);
217 if (!skb)
218 goto err;
219
220 qfec_bd_skbuf_set(p_bd, skb);
221
222 v = skb_put(skb, ETH_BUF_SIZE);
223 qfec_bd_virt_set(p_bd, v);
224
225 p = (void *) dma_map_single(&dev->dev,
226 (void *)skb->data, ETH_BUF_SIZE, DMA_FROM_DEVICE);
227 qfec_bd_pbuf_set(p_bd, p);
228 qfec_bd_phys_set(p_bd, p);
229
230 /* populate control register */
231 /* mark the last BD and set end-of-ring bit */
232 qfec_bd_ctl_wr(p_bd, ETH_BUF_SIZE |
233 (qfec_bd_last_bd(p_bd) ? BUF_RX_RER : 0));
234
235 qfec_bd_status_set(p_bd, BUF_OWN);
236
237 if (!(qfec_debug & QFEC_LOG_DBG2))
238 return 0;
239
240 /* debug messages */
241 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p bd\n", __func__, p_bd);
242
243 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p skb\n", __func__, skb);
244
245 QFEC_LOG(QFEC_LOG_DBG2,
246 "%s: %p p_bd, %p data, %p skb_put, %p virt, %p p_buf, %p p\n",
247 __func__, (void *)p_bd,
248 (void *)skb->data, v, /*(void *)skb_put(skb, ETH_BUF_SIZE), */
249 (void *)qfec_bd_virt_get(p_bd), (void *)qfec_bd_pbuf_get(p_bd),
250 (void *)p);
251
252 return 0;
253
254err:
255 return -ENOMEM;
256};
257
258/*
259 * ring structure used to maintain indices of buffer-descriptor (BD) usage
260 *
261 * The RX BDs are normally all pre-allocated with buffers available to be
262 * DMA'd into with received frames. The head indicates the first BD/buffer
263 * containing a received frame, and the tail indicates the oldest BD/buffer
264 * that needs to be restored for use. Head and tail are both initialized
265 * to zero, and n_free is initialized to zero, since all BD are initialized.
266 *
267 * The TX BDs are normally available for use, only being initialized as
268 * TX frames are requested for transmission. The head indicates the
269 * first available BD, and the tail indicate the oldest BD that has
270 * not been acknowledged as transmitted. Head and tail are both initialized
271 * to zero, and n_free is initialized to len, since all are available for use.
272 */
273struct ring {
274 int head;
275 int tail;
276 int n_free;
277 int len;
278};
279
280/* accessory in line functions for struct ring */
281static inline void qfec_ring_init(struct ring *p_ring, int size, int free)
282{
283 p_ring->head = p_ring->tail = 0;
284 p_ring->len = size;
285 p_ring->n_free = free;
286}
287
288static inline int qfec_ring_full(struct ring *p_ring)
289{
290 return (p_ring->n_free == 0);
291};
292
293static inline int qfec_ring_empty(struct ring *p_ring)
294{
295 return (p_ring->n_free == p_ring->len);
296}
297
298static inline void qfec_ring_head_adv(struct ring *p_ring)
299{
300 p_ring->head = ++p_ring->head % p_ring->len;
301 p_ring->n_free--;
302};
303
304static inline void qfec_ring_tail_adv(struct ring *p_ring)
305{
306 p_ring->tail = ++p_ring->tail % p_ring->len;
307 p_ring->n_free++;
308};
309
310static inline int qfec_ring_head(struct ring *p_ring)
311{
312
313 return p_ring->head;
314};
315
316static inline int qfec_ring_tail(struct ring *p_ring)
317{
318 return p_ring->tail;
319};
320
321static inline int qfec_ring_room(struct ring *p_ring)
322{
323 return p_ring->n_free;
324};
325
326/*
327 * counters track normal and abnormal driver events and activity
328 */
329enum cntr {
330 isr = 0,
331 fatal_bus,
332
333 early_tx,
334 tx_no_resource,
335 tx_proc_stopped,
336 tx_jabber_tmout,
337
338 xmit,
339 tx_int,
340 tx_isr,
341 tx_owned,
342 tx_underflow,
343
344 tx_replenish,
345 tx_skb_null,
346 tx_timeout,
347 tx_too_large,
348
349 gmac_isr,
350
351 /* half */
352 norm_int,
353 abnorm_int,
354
355 early_rx,
356 rx_buf_unavail,
357 rx_proc_stopped,
358 rx_watchdog,
359
360 netif_rx_cntr,
361 rx_int,
362 rx_isr,
363 rx_owned,
364 rx_overflow,
365
366 rx_dropped,
367 rx_skb_null,
368 queue_start,
369 queue_stop,
370
371 rx_paddr_nok,
372 ts_ioctl,
373 ts_tx_en,
374 ts_tx_rtn,
375
376 ts_rec,
377 cntr_last,
378};
379
380static char *cntr_name[] = {
381 "isr",
382 "fatal_bus",
383
384 "early_tx",
385 "tx_no_resource",
386 "tx_proc_stopped",
387 "tx_jabber_tmout",
388
389 "xmit",
390 "tx_int",
391 "tx_isr",
392 "tx_owned",
393 "tx_underflow",
394
395 "tx_replenish",
396 "tx_skb_null",
397 "tx_timeout",
398 "tx_too_large",
399
400 "gmac_isr",
401
402 /* half */
403 "norm_int",
404 "abnorm_int",
405
406 "early_rx",
407 "rx_buf_unavail",
408 "rx_proc_stopped",
409 "rx_watchdog",
410
411 "netif_rx",
412 "rx_int",
413 "rx_isr",
414 "rx_owned",
415 "rx_overflow",
416
417 "rx_dropped",
418 "rx_skb_null",
419 "queue_start",
420 "queue_stop",
421
422 "rx_paddr_nok",
423 "ts_ioctl",
424 "ts_tx_en",
425 "ts_tx_rtn",
426
427 "ts_rec",
428 ""
429};
430
431/*
432 * private data
433 */
434
435static struct net_device *qfec_dev;
436
437enum qfec_state {
438 timestamping = 0x04,
439};
440
441struct qfec_priv {
442 struct net_device *net_dev;
443 struct net_device_stats stats; /* req statistics */
444
445 struct device dev;
446
447 spinlock_t xmit_lock;
448 spinlock_t mdio_lock;
449
450 unsigned int state; /* driver state */
451
452 unsigned int bd_size; /* buf-desc alloc size */
453 struct qfec_buf_desc *bd_base; /* * qfec-buf-desc */
454 dma_addr_t tbd_dma; /* dma/phy-addr buf-desc */
455 dma_addr_t rbd_dma; /* dma/phy-addr buf-desc */
456
457 struct resource *mac_res;
458 void *mac_base; /* mac (virt) base address */
459
460 struct resource *clk_res;
461 void *clk_base; /* clk (virt) base address */
462
463 struct resource *fuse_res;
464 void *fuse_base; /* mac addr fuses */
465
466 unsigned int n_tbd; /* # of TX buf-desc */
467 struct ring ring_tbd; /* TX ring */
468 struct buf_desc *p_tbd;
469 unsigned int tx_ic_mod; /* (%) val for setting IC */
470
471 unsigned int n_rbd; /* # of RX buf-desc */
472 struct ring ring_rbd; /* RX ring */
473 struct buf_desc *p_rbd;
474
475 struct buf_desc *p_latest_rbd;
476 struct buf_desc *p_ending_rbd;
477
478 unsigned long cntr[cntr_last]; /* activity counters */
479
480 struct mii_if_info mii; /* used by mii lib */
481
482 int mdio_clk; /* phy mdio clock rate */
483 int phy_id; /* default PHY addr (0) */
484 struct timer_list phy_tmr; /* monitor PHY state */
485};
486
487/*
488 * cntrs display
489 */
490
491static int qfec_cntrs_show(struct device *dev, struct device_attribute *attr,
492 char *buf)
493{
494 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
495 int h = (cntr_last + 1) / 2;
496 int l;
497 int n;
498 int count = PAGE_SIZE;
499
500 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
501
502 l = snprintf(&buf[0], count, "%s:\n", __func__);
503 for (n = 0; n < h; n++) {
504 l += snprintf(&buf[l], count - l,
505 " %12lu %-16s %12lu %s\n",
506 priv->cntr[n], cntr_name[n],
507 priv->cntr[n+h], cntr_name[n+h]);
508 }
509
510 return l;
511}
512
513# define CNTR_INC(priv, name) (priv->cntr[name]++)
514
515/*
516 * functions that manage state
517 */
518static inline void qfec_queue_start(struct net_device *dev)
519{
520 struct qfec_priv *priv = netdev_priv(dev);
521
522 if (netif_queue_stopped(dev)) {
523 netif_wake_queue(dev);
524 CNTR_INC(priv, queue_start);
525 }
526};
527
528static inline void qfec_queue_stop(struct net_device *dev)
529{
530 struct qfec_priv *priv = netdev_priv(dev);
531
532 netif_stop_queue(dev);
533 CNTR_INC(priv, queue_stop);
534};
535
536/*
537 * functions to access and initialize the MAC registers
538 */
539static inline uint32_t qfec_reg_read(struct qfec_priv *priv, uint32_t reg)
540{
541 return ioread32((void *) (priv->mac_base + reg));
542}
543
544static void qfec_reg_write(struct qfec_priv *priv, uint32_t reg, uint32_t val)
545{
546 uint32_t addr = (uint32_t)priv->mac_base + reg;
547
548 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
549 iowrite32(val, (void *)addr);
550}
551
552/*
553 * speed/duplex/pause settings
554 */
555static int qfec_config_show(struct device *dev, struct device_attribute *attr,
556 char *buf)
557{
558 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
559 int cfg = qfec_reg_read(priv, MAC_CONFIG_REG);
560 int flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
561 int l = 0;
562 int count = PAGE_SIZE;
563
564 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
565
566 l += snprintf(&buf[l], count, "%s:", __func__);
567
568 l += snprintf(&buf[l], count - l, " [0x%08x] %4dM %s %s", cfg,
569 (cfg & MAC_CONFIG_REG_PS)
570 ? ((cfg & MAC_CONFIG_REG_FES) ? 100 : 10) : 1000,
571 cfg & MAC_CONFIG_REG_DM ? "FD" : "HD",
572 cfg & MAC_CONFIG_REG_IPC ? "IPC" : "NoIPC");
573
574 flow &= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
575 l += snprintf(&buf[l], count - l, " [0x%08x] %s", flow,
576 (flow == (FLOW_CONTROL_RFE | FLOW_CONTROL_TFE)) ? "PAUSE"
577 : ((flow == FLOW_CONTROL_RFE) ? "RX-PAUSE"
578 : ((flow == FLOW_CONTROL_TFE) ? "TX-PAUSE" : "")));
579
580 l += snprintf(&buf[l], count - l, " %s", QFEC_DRV_VER);
581 l += snprintf(&buf[l], count - l, "\n");
582 return l;
583}
584
585
586/*
587 * table and functions to initialize controller registers
588 */
589
590struct reg_entry {
591 unsigned int rdonly;
592 unsigned int addr;
593 char *label;
594 unsigned int val;
595};
596
597static struct reg_entry qfec_reg_tbl[] = {
598 { 0, BUS_MODE_REG, "BUS_MODE_REG", BUS_MODE_REG_DEFAULT },
599 { 0, AXI_BUS_MODE_REG, "AXI_BUS_MODE_REG", AXI_BUS_MODE_DEFAULT },
600 { 0, AXI_STATUS_REG, "AXI_STATUS_REG", 0 },
601
602 { 0, MAC_ADR_0_HIGH_REG, "MAC_ADR_0_HIGH_REG", 0x00000302 },
603 { 0, MAC_ADR_0_LOW_REG, "MAC_ADR_0_LOW_REG", 0x01350702 },
604
605 { 1, RX_DES_LST_ADR_REG, "RX_DES_LST_ADR_REG", 0 },
606 { 1, TX_DES_LST_ADR_REG, "TX_DES_LST_ADR_REG", 0 },
607 { 1, STATUS_REG, "STATUS_REG", 0 },
608 { 1, DEBUG_REG, "DEBUG_REG", 0 },
609
610 { 0, INTRP_EN_REG, "INTRP_EN_REG", QFEC_INTRP_SETUP},
611
612 { 1, CUR_HOST_TX_DES_REG, "CUR_HOST_TX_DES_REG", 0 },
613 { 1, CUR_HOST_RX_DES_REG, "CUR_HOST_RX_DES_REG", 0 },
614 { 1, CUR_HOST_TX_BU_ADR_REG, "CUR_HOST_TX_BU_ADR_REG", 0 },
615 { 1, CUR_HOST_RX_BU_ADR_REG, "CUR_HOST_RX_BU_ADR_REG", 0 },
616
617 { 1, MAC_FR_FILTER_REG, "MAC_FR_FILTER_REG", 0 },
618
619 { 0, MAC_CONFIG_REG, "MAC_CONFIG_REG", MAC_CONFIG_REG_SPD_1G
620 | MAC_CONFIG_REG_DM
621 | MAC_CONFIG_REG_TE
622 | MAC_CONFIG_REG_RE
623 | MAC_CONFIG_REG_IPC },
624
625 { 1, INTRP_STATUS_REG, "INTRP_STATUS_REG", 0 },
626 { 1, INTRP_MASK_REG, "INTRP_MASK_REG", 0 },
627
628 { 0, OPER_MODE_REG, "OPER_MODE_REG", OPER_MODE_REG_DEFAULT },
629
630 { 1, GMII_ADR_REG, "GMII_ADR_REG", 0 },
631 { 1, GMII_DATA_REG, "GMII_DATA_REG", 0 },
632
633 { 0, MMC_INTR_MASK_RX_REG, "MMC_INTR_MASK_RX_REG", 0xFFFFFFFF },
634 { 0, MMC_INTR_MASK_TX_REG, "MMC_INTR_MASK_TX_REG", 0xFFFFFFFF },
635
636 { 1, TS_HIGH_REG, "TS_HIGH_REG", 0 },
637 { 1, TS_LOW_REG, "TS_LOW_REG", 0 },
638
639 { 1, TS_HI_UPDT_REG, "TS_HI_UPDATE_REG", 0 },
640 { 1, TS_LO_UPDT_REG, "TS_LO_UPDATE_REG", 0 },
641 { 0, TS_SUB_SEC_INCR_REG, "TS_SUB_SEC_INCR_REG", 86 },
642
643 { 0, TS_CTL_REG, "TS_CTL_REG", TS_CTL_TSENALL
644 | TS_CTL_TSCTRLSSR
645 | TS_CTL_TSINIT
646 | TS_CTL_TSENA },
647};
648
649static void qfec_reg_init(struct qfec_priv *priv)
650{
651 struct reg_entry *p = qfec_reg_tbl;
652 int n = ARRAY_SIZE(qfec_reg_tbl);
653
654 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
655
656 for (; n--; p++) {
657 if (!p->rdonly)
658 qfec_reg_write(priv, p->addr, p->val);
659 }
660}
661
662/*
663 * display registers thru sysfs
664 */
665static int qfec_reg_show(struct device *dev, struct device_attribute *attr,
666 char *buf)
667{
668 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
669 struct reg_entry *p = qfec_reg_tbl;
670 int n = ARRAY_SIZE(qfec_reg_tbl);
671 int l = 0;
672 int count = PAGE_SIZE;
673
674 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
675
676 for (; n--; p++) {
677 l += snprintf(&buf[l], count - l, " %8p %04x %08x %s\n",
678 (void *)priv->mac_base + p->addr, p->addr,
679 qfec_reg_read(priv, p->addr), p->label);
680 }
681
682 return l;
683}
684
685/*
686 * set the MAC-0 address
687 */
688static void qfec_set_adr_regs(struct qfec_priv *priv, uint8_t *addr)
689{
690 uint32_t h = 0;
691 uint32_t l = 0;
692
693 h = h << 8 | addr[5];
694 h = h << 8 | addr[4];
695
696 l = l << 8 | addr[3];
697 l = l << 8 | addr[2];
698 l = l << 8 | addr[1];
699 l = l << 8 | addr[0];
700
701 qfec_reg_write(priv, MAC_ADR_0_HIGH_REG, h);
702 qfec_reg_write(priv, MAC_ADR_0_LOW_REG, l);
703
704 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x %08x\n", __func__, h, l);
705}
706
707/*
708 * reset the controller
709 */
710
711#define QFEC_RESET_TIMEOUT 10000
712 /* reset should always clear but did not w/o test/delay
713 * in RgMii mode. there is no spec'd max timeout
714 */
715
716static int qfec_hw_reset(struct qfec_priv *priv)
717{
718 int timeout = QFEC_RESET_TIMEOUT;
719
720 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
721
722 qfec_reg_write(priv, BUS_MODE_REG, BUS_MODE_SWR);
723
724 while (qfec_reg_read(priv, BUS_MODE_REG) & BUS_MODE_SWR) {
725 if (timeout-- == 0) {
726 QFEC_LOG_ERR("%s: timeout\n", __func__);
727 return -ETIME;
728 }
729
730 /* there were problems resetting the controller
731 * in RGMII mode when there wasn't sufficient
732 * delay between register reads
733 */
734 usleep_range(100, 200);
735 }
736
737 return 0;
738}
739
740/*
741 * initialize controller
742 */
743static int qfec_hw_init(struct qfec_priv *priv)
744{
745 int res = 0;
746
747 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
748
749 res = qfec_hw_reset(priv);
750 if (res)
751 return res;
752
753 qfec_reg_init(priv);
754
755 /* config buf-desc locations */
756 qfec_reg_write(priv, TX_DES_LST_ADR_REG, priv->tbd_dma);
757 qfec_reg_write(priv, RX_DES_LST_ADR_REG, priv->rbd_dma);
758
759 /* clear interrupts */
760 qfec_reg_write(priv, STATUS_REG, INTRP_EN_REG_NIE | INTRP_EN_REG_RIE
761 | INTRP_EN_REG_TIE | INTRP_EN_REG_TUE | INTRP_EN_REG_ETE);
762
763 return res;
764}
765
766/*
767 * en/disable controller
768 */
769static void qfec_hw_enable(struct qfec_priv *priv)
770{
771 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
772
773 qfec_reg_write(priv, OPER_MODE_REG,
774 qfec_reg_read(priv, OPER_MODE_REG)
775 | OPER_MODE_REG_ST | OPER_MODE_REG_SR);
776}
777
778static void qfec_hw_disable(struct qfec_priv *priv)
779{
780 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
781
782 qfec_reg_write(priv, OPER_MODE_REG,
783 qfec_reg_read(priv, OPER_MODE_REG)
784 & ~(OPER_MODE_REG_ST | OPER_MODE_REG_SR));
785}
786
787/*
788 * interface selection
789 */
790struct intf_config {
791 uint32_t intf_sel;
792 uint32_t emac_ns;
793 uint32_t eth_x_en_ns;
794 uint32_t clkmux_sel;
795};
796
797#define ETH_X_EN_NS_REVMII (ETH_X_EN_NS_DEFAULT | ETH_TX_CLK_INV)
798#define CLKMUX_REVMII (EMAC_CLKMUX_SEL_0 | EMAC_CLKMUX_SEL_1)
799
800static struct intf_config intf_config_tbl[] = {
801 { EMAC_PHY_INTF_SEL_MII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
802 { EMAC_PHY_INTF_SEL_RGMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
803 { EMAC_PHY_INTF_SEL_REVMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_REVMII,
804 CLKMUX_REVMII }
805};
806
807/*
808 * emac clk register read and write functions
809 */
810static inline uint32_t qfec_clkreg_read(struct qfec_priv *priv, uint32_t reg)
811{
812 return ioread32((void *) (priv->clk_base + reg));
813}
814
815static inline void qfec_clkreg_write(struct qfec_priv *priv,
816 uint32_t reg, uint32_t val)
817{
818 uint32_t addr = (uint32_t)priv->clk_base + reg;
819
820 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
821 iowrite32(val, (void *)addr);
822}
823
824/*
825 * configure the PHY interface and clock routing and signal bits
826 */
827enum phy_intfc {
828 intfc_mii = 0,
829 intfc_rgmii = 1,
830 intfc_revmii = 2,
831};
832
833static int qfec_intf_sel(struct qfec_priv *priv, unsigned int intfc)
834{
835 struct intf_config *p;
836
837 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d\n", __func__, intfc);
838
839 if (intfc > intfc_revmii) {
840 QFEC_LOG_ERR("%s: range\n", __func__);
841 return -ENXIO;
842 }
843
844 p = &intf_config_tbl[intfc];
845
846 qfec_clkreg_write(priv, EMAC_PHY_INTF_SEL_REG, p->intf_sel);
847 qfec_clkreg_write(priv, EMAC_NS_REG, p->emac_ns);
848 qfec_clkreg_write(priv, ETH_X_EN_NS_REG, p->eth_x_en_ns);
849 qfec_clkreg_write(priv, EMAC_CLKMUX_SEL_REG, p->clkmux_sel);
850
851 return 0;
852}
853
854/*
855 * display registers thru proc-fs
856 */
857static struct qfec_clk_reg {
858 uint32_t offset;
859 char *label;
860} qfec_clk_regs[] = {
861 { ETH_MD_REG, "ETH_MD_REG" },
862 { ETH_NS_REG, "ETH_NS_REG" },
863 { ETH_X_EN_NS_REG, "ETH_X_EN_NS_REG" },
864 { EMAC_PTP_MD_REG, "EMAC_PTP_MD_REG" },
865 { EMAC_PTP_NS_REG, "EMAC_PTP_NS_REG" },
866 { EMAC_NS_REG, "EMAC_NS_REG" },
867 { EMAC_TX_FS_REG, "EMAC_TX_FS_REG" },
868 { EMAC_RX_FS_REG, "EMAC_RX_FS_REG" },
869 { EMAC_PHY_INTF_SEL_REG, "EMAC_PHY_INTF_SEL_REG" },
870 { EMAC_PHY_ADDR_REG, "EMAC_PHY_ADDR_REG" },
871 { EMAC_REVMII_PHY_ADDR_REG, "EMAC_REVMII_PHY_ADDR_REG" },
872 { EMAC_CLKMUX_SEL_REG, "EMAC_CLKMUX_SEL_REG" },
873};
874
875static int qfec_clk_reg_show(struct device *dev, struct device_attribute *attr,
876 char *buf)
877{
878 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
879 struct qfec_clk_reg *p = qfec_clk_regs;
880 int n = ARRAY_SIZE(qfec_clk_regs);
881 int l = 0;
882 int count = PAGE_SIZE;
883
884 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
885
886 for (; n--; p++) {
887 l += snprintf(&buf[l], count - l, " %8p %8x %08x %s\n",
888 (void *)priv->clk_base + p->offset, p->offset,
889 qfec_clkreg_read(priv, p->offset), p->label);
890 }
891
892 return l;
893}
894
895/*
896 * speed selection
897 */
898
899struct qfec_pll_cfg {
900 uint32_t spd;
901 uint32_t eth_md; /* M [31:16], NOT 2*D [15:0] */
902 uint32_t eth_ns; /* NOT(M-N) [31:16], ctl bits [11:0] */
903};
904
905static struct qfec_pll_cfg qfec_pll_cfg_tbl[] = {
906 /* 2.5 MHz */
907 { MAC_CONFIG_REG_SPD_10, ETH_MD_M(1) | ETH_MD_2D_N(100),
908 ETH_NS_NM(100-1)
909 | ETH_NS_MCNTR_EN
910 | ETH_NS_MCNTR_MODE_DUAL
911 | ETH_NS_PRE_DIV(0)
912 | CLK_SRC_PLL_EMAC },
913 /* 25 MHz */
914 { MAC_CONFIG_REG_SPD_100, ETH_MD_M(1) | ETH_MD_2D_N(10),
915 ETH_NS_NM(10-1)
916 | ETH_NS_MCNTR_EN
917 | ETH_NS_MCNTR_MODE_DUAL
918 | ETH_NS_PRE_DIV(0)
919 | CLK_SRC_PLL_EMAC },
920 /* 125 MHz */
921 {MAC_CONFIG_REG_SPD_1G, 0, ETH_NS_PRE_DIV(1)
922 | CLK_SRC_PLL_EMAC },
923};
924
925enum speed {
926 spd_10 = 0,
927 spd_100 = 1,
928 spd_1000 = 2,
929};
930
931/*
932 * configure the PHY interface and clock routing and signal bits
933 */
934static int qfec_speed_cfg(struct net_device *dev, unsigned int spd,
935 unsigned int dplx)
936{
937 struct qfec_priv *priv = netdev_priv(dev);
938 struct qfec_pll_cfg *p;
939
940 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d spd, %d dplx\n", __func__, spd, dplx);
941
942 if (spd > spd_1000) {
943 QFEC_LOG_ERR("%s: range\n", __func__);
944 return -ENODEV;
945 }
946
947 p = &qfec_pll_cfg_tbl[spd];
948
949 /* set the MAC speed bits */
950 qfec_reg_write(priv, MAC_CONFIG_REG,
951 (qfec_reg_read(priv, MAC_CONFIG_REG)
952 & ~(MAC_CONFIG_REG_SPD | MAC_CONFIG_REG_DM))
953 | p->spd | (dplx ? MAC_CONFIG_REG_DM : 0));
954
955 qfec_clkreg_write(priv, ETH_MD_REG, p->eth_md);
956 qfec_clkreg_write(priv, ETH_NS_REG, p->eth_ns);
957
958 return 0;
959}
960
961/*
962 * configure PTP divider for 25 MHz assuming EMAC PLL 250 MHz
963 */
964
965static struct qfec_pll_cfg qfec_pll_ptp = {
966 /* 25 MHz */
967 0, ETH_MD_M(1) | ETH_MD_2D_N(10), ETH_NS_NM(10-1)
968 | EMAC_PTP_NS_ROOT_EN
969 | EMAC_PTP_NS_CLK_EN
970 | ETH_NS_MCNTR_EN
971 | ETH_NS_MCNTR_MODE_DUAL
972 | ETH_NS_PRE_DIV(0)
973 | CLK_SRC_PLL_EMAC
974};
975
976#define PLLTEST_PAD_CFG 0x01E0
977#define PLLTEST_PLL_7 0x3700
978
979#define CLKTEST_REG 0x01EC
980#define CLKTEST_EMAC_RX 0x3fc07f7a
981
982static int qfec_ptp_cfg(struct qfec_priv *priv)
983{
984 struct qfec_pll_cfg *p = &qfec_pll_ptp;
985
986 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x md, %08x ns\n",
987 __func__, p->eth_md, p->eth_ns);
988
989 qfec_clkreg_write(priv, EMAC_PTP_MD_REG, p->eth_md);
990 qfec_clkreg_write(priv, EMAC_PTP_NS_REG, p->eth_ns);
991
992 /* configure HS/LS clk test ports to verify clks */
993 qfec_clkreg_write(priv, CLKTEST_REG, CLKTEST_EMAC_RX);
994 qfec_clkreg_write(priv, PLLTEST_PAD_CFG, PLLTEST_PLL_7);
995
996 return 0;
997}
998
999/*
1000 * MDIO operations
1001 */
1002
1003/*
1004 * wait reasonable amount of time for MDIO operation to complete, not busy
1005 */
1006static int qfec_mdio_busy(struct net_device *dev)
1007{
1008 int i;
1009
1010 for (i = 100; i > 0; i--) {
1011 if (!(qfec_reg_read(
1012 netdev_priv(dev), GMII_ADR_REG) & GMII_ADR_REG_GB)) {
1013 return 0;
1014 }
1015 udelay(1);
1016 }
1017
1018 return -ETIME;
1019}
1020
1021/*
1022 * initiate either a read or write MDIO operation
1023 */
1024
1025static int qfec_mdio_oper(struct net_device *dev, int phy_id, int reg, int wr)
1026{
1027 struct qfec_priv *priv = netdev_priv(dev);
1028 int res = 0;
1029
1030 /* insure phy not busy */
1031 res = qfec_mdio_busy(dev);
1032 if (res) {
1033 QFEC_LOG_ERR("%s: busy\n", __func__);
1034 goto done;
1035 }
1036
1037 /* initiate operation */
1038 qfec_reg_write(priv, GMII_ADR_REG,
1039 GMII_ADR_REG_ADR_SET(phy_id)
1040 | GMII_ADR_REG_REG_SET(reg)
1041 | GMII_ADR_REG_CSR_SET(priv->mdio_clk)
1042 | (wr ? GMII_ADR_REG_GW : 0)
1043 | GMII_ADR_REG_GB);
1044
1045 /* wait for operation to complete */
1046 res = qfec_mdio_busy(dev);
1047 if (res)
1048 QFEC_LOG_ERR("%s: timeout\n", __func__);
1049
1050done:
1051 return res;
1052}
1053
1054/*
1055 * read MDIO register
1056 */
1057static int qfec_mdio_read(struct net_device *dev, int phy_id, int reg)
1058{
1059 struct qfec_priv *priv = netdev_priv(dev);
1060 int res = 0;
1061 unsigned long flags;
1062
1063 spin_lock_irqsave(&priv->mdio_lock, flags);
1064
1065 res = qfec_mdio_oper(dev, phy_id, reg, 0);
1066 if (res) {
1067 QFEC_LOG_ERR("%s: oper\n", __func__);
1068 goto done;
1069 }
1070
1071 res = qfec_reg_read(priv, GMII_DATA_REG);
1072 QFEC_LOG(QFEC_LOG_MDIO_R, "%s: %2d reg, 0x%04x val\n",
1073 __func__, reg, res);
1074
1075done:
1076 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1077 return res;
1078}
1079
1080/*
1081 * write MDIO register
1082 */
1083static void qfec_mdio_write(struct net_device *dev, int phy_id, int reg,
1084 int val)
1085{
1086 struct qfec_priv *priv = netdev_priv(dev);
1087 unsigned long flags;
1088
1089 spin_lock_irqsave(&priv->mdio_lock, flags);
1090
1091 QFEC_LOG(QFEC_LOG_MDIO_W, "%s: %2d reg, %04x\n",
1092 __func__, reg, val);
1093
1094 qfec_reg_write(priv, GMII_DATA_REG, val);
1095
1096 if (qfec_mdio_oper(dev, phy_id, reg, 1))
1097 QFEC_LOG_ERR("%s: oper\n", __func__);
1098
1099 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1100}
1101
1102/*
1103 * get auto-negotiation results
1104 */
1105
1106#define QFEC_100 (LPA_100HALF | LPA_100FULL | LPA_100HALF)
1107#define QFEC_100_FD (LPA_100FULL | LPA_100BASE4)
1108#define QFEC_10 (LPA_10HALF | LPA_10FULL)
1109#define QFEC_10_FD LPA_10FULL
1110
1111static void qfec_get_an(struct net_device *dev, uint32_t *spd, uint32_t *dplx)
1112{
1113 struct qfec_priv *priv = netdev_priv(dev);
1114 uint32_t status;
1115 uint32_t advert;
1116 uint32_t lpa;
1117 uint32_t flow;
1118
1119 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
1120 lpa = qfec_mdio_read(dev, priv->phy_id, MII_LPA);
1121 status = advert & lpa;
1122
1123 /* todo: check extended status register for 1G abilities */
1124
1125 if (status & QFEC_100) {
1126 *spd = spd_100;
1127 *dplx = status & QFEC_100_FD ? 1 : 0;
1128 }
1129
1130 else if (status & QFEC_10) {
1131 *spd = spd_10;
1132 *dplx = status & QFEC_10_FD ? 1 : 0;
1133 }
1134
1135 /* check pause */
1136 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
1137 flow &= ~(FLOW_CONTROL_TFE | FLOW_CONTROL_RFE);
1138
1139 if (status & ADVERTISE_PAUSE_CAP) {
1140 flow |= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
1141 } else if (status & ADVERTISE_PAUSE_ASYM) {
1142 if (lpa & ADVERTISE_PAUSE_CAP)
1143 flow |= FLOW_CONTROL_TFE;
1144 else if (advert & ADVERTISE_PAUSE_CAP)
1145 flow |= FLOW_CONTROL_RFE;
1146 }
1147
1148 qfec_reg_write(priv, FLOW_CONTROL_REG, flow);
1149}
1150
1151/*
1152 * monitor phy status, and process auto-neg results when changed
1153 */
1154
1155static void qfec_phy_monitor(unsigned long data)
1156{
1157 struct net_device *dev = (struct net_device *) data;
1158 struct qfec_priv *priv = netdev_priv(dev);
1159 unsigned int spd = 0;
1160 unsigned int dplx = 1;
1161
1162 mod_timer(&priv->phy_tmr, jiffies + HZ);
1163
1164 if (mii_link_ok(&priv->mii) && !netif_carrier_ok(priv->net_dev)) {
1165 qfec_get_an(dev, &spd, &dplx);
1166 qfec_speed_cfg(dev, spd, dplx);
1167 QFEC_LOG(QFEC_LOG_DBG, "%s: link up, %d spd, %d dplx\n",
1168 __func__, spd, dplx);
1169
1170 netif_carrier_on(dev);
1171 }
1172
1173 else if (!mii_link_ok(&priv->mii) && netif_carrier_ok(priv->net_dev)) {
1174 QFEC_LOG(QFEC_LOG_DBG, "%s: link down\n", __func__);
1175 netif_carrier_off(dev);
1176 }
1177}
1178
1179/*
1180 * dealloc buffer descriptor memory
1181 */
1182
1183static void qfec_mem_dealloc(struct net_device *dev)
1184{
1185 struct qfec_priv *priv = netdev_priv(dev);
1186
1187 dma_free_coherent(&dev->dev,
1188 priv->bd_size, priv->bd_base, priv->tbd_dma);
1189 priv->bd_base = 0;
1190}
1191
1192/*
1193 * allocate shared device memory for TX/RX buf-desc (and buffers)
1194 */
1195
1196static int qfec_mem_alloc(struct net_device *dev)
1197{
1198 struct qfec_priv *priv = netdev_priv(dev);
1199
1200 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1201
1202 priv->bd_size =
1203 (priv->n_tbd + priv->n_rbd) * sizeof(struct qfec_buf_desc);
1204
1205 priv->p_tbd = kcalloc(priv->n_tbd, sizeof(struct buf_desc), GFP_KERNEL);
1206 if (!priv->p_tbd) {
1207 QFEC_LOG_ERR("%s: kcalloc failed p_tbd\n", __func__);
1208 return -ENOMEM;
1209 }
1210
1211 priv->p_rbd = kcalloc(priv->n_rbd, sizeof(struct buf_desc), GFP_KERNEL);
1212 if (!priv->p_rbd) {
1213 QFEC_LOG_ERR("%s: kcalloc failed p_rbd\n", __func__);
1214 return -ENOMEM;
1215 }
1216
1217 /* alloc mem for buf-desc, if not already alloc'd */
1218 if (!priv->bd_base) {
1219 priv->bd_base = dma_alloc_coherent(&dev->dev,
1220 priv->bd_size, &priv->tbd_dma,
1221 GFP_KERNEL | __GFP_DMA);
1222 }
1223
1224 if (!priv->bd_base) {
1225 QFEC_LOG_ERR("%s: dma_alloc_coherent failed\n", __func__);
1226 return -ENOMEM;
1227 }
1228
1229 priv->rbd_dma = priv->tbd_dma
1230 + (priv->n_tbd * sizeof(struct qfec_buf_desc));
1231
1232 QFEC_LOG(QFEC_LOG_DBG,
1233 " %s: 0x%08x size, %d n_tbd, %d n_rbd\n",
1234 __func__, priv->bd_size, priv->n_tbd, priv->n_rbd);
1235
1236 return 0;
1237}
1238
1239/*
1240 * display buffer descriptors
1241 */
1242
1243static int qfec_bd_fmt(char *buf, int size, struct buf_desc *p_bd)
1244{
1245 return snprintf(buf, size,
1246 "%8p: %08x %08x %8p %8p %8p %8p %8p %x",
1247 p_bd, qfec_bd_status_get(p_bd),
1248 qfec_bd_ctl_get(p_bd), qfec_bd_pbuf_get(p_bd),
1249 qfec_bd_next_get(p_bd), qfec_bd_skbuf_get(p_bd),
1250 qfec_bd_virt_get(p_bd), qfec_bd_phys_get(p_bd),
1251 qfec_bd_last_bd(p_bd));
1252}
1253
1254static int qfec_bd_show(char *buf, int count, struct buf_desc *p_bd, int n_bd,
1255 struct ring *p_ring, char *label)
1256{
1257 int l = 0;
1258 int n;
1259
1260 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, label);
1261
1262 l += snprintf(&buf[l], count, "%s: %s\n", __func__, label);
1263 if (!p_bd)
1264 return l;
1265
1266 n_bd = n_bd > MAX_N_BD ? MAX_N_BD : n_bd;
1267
1268 for (n = 0; n < n_bd; n++, p_bd++) {
1269 l += qfec_bd_fmt(&buf[l], count - l, p_bd);
1270 l += snprintf(&buf[l], count - l, "%s%s\n",
1271 (qfec_ring_head(p_ring) == n ? " < h" : ""),
1272 (qfec_ring_tail(p_ring) == n ? " < t" : ""));
1273 }
1274
1275 return l;
1276}
1277
1278/*
1279 * display TX BDs
1280 */
1281static int qfec_bd_tx_show(struct device *dev, struct device_attribute *attr,
1282 char *buf)
1283{
1284 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1285 int count = PAGE_SIZE;
1286
1287 return qfec_bd_show(buf, count, priv->p_tbd, priv->n_tbd,
1288 &priv->ring_tbd, "TX");
1289}
1290
1291/*
1292 * display RX BDs
1293 */
1294static int qfec_bd_rx_show(struct device *dev, struct device_attribute *attr,
1295 char *buf)
1296{
1297 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1298 int count = PAGE_SIZE;
1299
1300 return qfec_bd_show(buf, count, priv->p_rbd, priv->n_rbd,
1301 &priv->ring_rbd, "RX");
1302}
1303
1304/*
1305 * read timestamp from buffer descriptor
1306 * the pbuf and next fields of the buffer descriptors are overwritten
1307 * with the timestamp high and low register values. The high register
1308 * counts seconds, but the sub-second increment register is programmed
1309 * with the appropriate value to increment the timestamp low register
1310 * such that it overflows at 0x8000 0000. The low register value
1311 * (next) must be converted to units of nano secs, * 10^9 / 2^31.
1312 */
1313static void qfec_read_timestamp(struct buf_desc *p_bd,
1314 struct skb_shared_hwtstamps *ts)
1315{
1316 unsigned long sec = (unsigned long)qfec_bd_next_get(p_bd);
1317 long long ns = (unsigned long)qfec_bd_pbuf_get(p_bd);
1318
1319#define BILLION 1000000000
1320#define LOW_REG_BITS 31
1321 ns *= BILLION;
1322 ns >>= LOW_REG_BITS;
1323
1324 ts->hwtstamp = ktime_set(sec, ns);
1325 ts->syststamp = ktime_set(sec, ns);
1326}
1327
1328/*
1329 * free transmitted skbufs from buffer-descriptor no owned by HW
1330 */
1331static int qfec_tx_replenish(struct net_device *dev)
1332{
1333 struct qfec_priv *priv = netdev_priv(dev);
1334 struct ring *p_ring = &priv->ring_tbd;
1335 struct buf_desc *p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1336 struct sk_buff *skb;
1337 unsigned long flags;
1338
1339 CNTR_INC(priv, tx_replenish);
1340
1341 spin_lock_irqsave(&priv->xmit_lock, flags);
1342
1343 while (!qfec_ring_empty(p_ring)) {
1344 if (qfec_bd_own(p_bd))
1345 break; /* done for now */
1346
1347 skb = qfec_bd_skbuf_get(p_bd);
1348 if (unlikely(skb == NULL)) {
1349 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1350 CNTR_INC(priv, tx_skb_null);
1351 break;
1352 }
1353
1354 qfec_reg_write(priv, STATUS_REG,
1355 STATUS_REG_TU | STATUS_REG_TI);
1356
1357 /* retrieve timestamp if requested */
1358 if (qfec_bd_status_get(p_bd) & BUF_TX_TTSS) {
1359 CNTR_INC(priv, ts_tx_rtn);
1360 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1361 skb_tstamp_tx(skb, skb_hwtstamps(skb));
1362 }
1363
1364 /* update statistics before freeing skb */
1365 priv->stats.tx_packets++;
1366 priv->stats.tx_bytes += skb->len;
1367
1368 dma_unmap_single(&dev->dev, (dma_addr_t) qfec_bd_pbuf_get(p_bd),
1369 skb->len, DMA_TO_DEVICE);
1370
1371 dev_kfree_skb_any(skb);
1372 qfec_bd_skbuf_set(p_bd, NULL);
1373
1374 qfec_ring_tail_adv(p_ring);
1375 p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1376 }
1377
1378 spin_unlock_irqrestore(&priv->xmit_lock, flags);
1379
1380 qfec_queue_start(dev);
1381
1382 return 0;
1383}
1384
1385/*
1386 * clear ownership bits of all TX buf-desc and release the sk-bufs
1387 */
1388static void qfec_tx_timeout(struct net_device *dev)
1389{
1390 struct qfec_priv *priv = netdev_priv(dev);
1391 struct buf_desc *bd = priv->p_tbd;
1392 int n;
1393
1394 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1395 CNTR_INC(priv, tx_timeout);
1396
1397 for (n = 0; n < priv->n_tbd; n++, bd++)
1398 qfec_bd_own_clr(bd);
1399
1400 qfec_tx_replenish(dev);
1401}
1402
1403/*
1404 * rx() - process a received frame
1405 */
1406static void qfec_rx_int(struct net_device *dev)
1407{
1408 struct qfec_priv *priv = netdev_priv(dev);
1409 struct ring *p_ring = &priv->ring_rbd;
1410 struct buf_desc *p_bd = priv->p_latest_rbd;
1411 uint32_t desc_status;
1412 uint32_t mis_fr_reg;
1413
1414 desc_status = qfec_bd_status_get(p_bd);
1415 mis_fr_reg = qfec_reg_read(priv, MIS_FR_REG);
1416
1417 CNTR_INC(priv, rx_int);
1418
1419 /* check that valid interrupt occurred */
1420 if (unlikely(desc_status & BUF_OWN)) {
1421 char s[100];
1422
1423 qfec_bd_fmt(s, sizeof(s), p_bd);
1424 QFEC_LOG_ERR("%s: owned by DMA, %08x, %s\n", __func__,
1425 qfec_reg_read(priv, CUR_HOST_RX_DES_REG), s);
1426 CNTR_INC(priv, rx_owned);
1427 return;
1428 }
1429
1430 /* accumulate missed-frame count (reg reset when read) */
1431 priv->stats.rx_missed_errors += mis_fr_reg
1432 & MIS_FR_REG_MISS_CNT;
1433
1434 /* process all unowned frames */
1435 while (!(desc_status & BUF_OWN) && (!qfec_ring_full(p_ring))) {
1436 struct sk_buff *skb;
1437 struct buf_desc *p_bd_next;
1438
1439 skb = qfec_bd_skbuf_get(p_bd);
1440
1441 if (unlikely(skb == NULL)) {
1442 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1443 CNTR_INC(priv, rx_skb_null);
1444 break;
1445 }
1446
1447 /* cache coherency before skb->data is accessed */
1448 dma_unmap_single(&dev->dev,
1449 (dma_addr_t) qfec_bd_phys_get(p_bd),
1450 ETH_BUF_SIZE, DMA_FROM_DEVICE);
1451 prefetch(skb->data);
1452
1453 if (unlikely(desc_status & BUF_RX_ES)) {
1454 priv->stats.rx_dropped++;
1455 CNTR_INC(priv, rx_dropped);
1456 dev_kfree_skb(skb);
1457 } else {
1458 qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
1459
1460 skb->len = BUF_RX_FL_GET_FROM_STATUS(desc_status);
1461
1462 if (priv->state & timestamping) {
1463 CNTR_INC(priv, ts_rec);
1464 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1465 }
1466
1467 /* update statistics before freeing skb */
1468 priv->stats.rx_packets++;
1469 priv->stats.rx_bytes += skb->len;
1470
1471 skb->dev = dev;
1472 skb->protocol = eth_type_trans(skb, dev);
1473 skb->ip_summed = CHECKSUM_UNNECESSARY;
1474
1475 if (NET_RX_DROP == netif_rx(skb)) {
1476 priv->stats.rx_dropped++;
1477 CNTR_INC(priv, rx_dropped);
1478 }
1479 CNTR_INC(priv, netif_rx_cntr);
1480 }
1481
1482 if (p_bd != priv->p_ending_rbd)
1483 p_bd_next = p_bd + 1;
1484 else
1485 p_bd_next = priv->p_rbd;
1486 desc_status = qfec_bd_status_get(p_bd_next);
1487
1488 qfec_bd_skbuf_set(p_bd, NULL);
1489
1490 qfec_ring_head_adv(p_ring);
1491 p_bd = p_bd_next;
1492 }
1493
1494 priv->p_latest_rbd = p_bd;
1495
1496 /* replenish bufs */
1497 while (!qfec_ring_empty(p_ring)) {
1498 if (qfec_rbd_init(dev, &priv->p_rbd[qfec_ring_tail(p_ring)]))
1499 break;
1500 qfec_ring_tail_adv(p_ring);
1501 }
1502}
1503
1504/*
1505 * isr() - interrupt service routine
1506 * determine cause of interrupt and invoke/schedule appropriate
1507 * processing or error handling
1508 */
1509#define ISR_ERR_CHK(priv, status, interrupt, cntr) \
1510 if (status & interrupt) \
1511 CNTR_INC(priv, cntr)
1512
1513static irqreturn_t qfec_int(int irq, void *dev_id)
1514{
1515 struct net_device *dev = dev_id;
1516 struct qfec_priv *priv = netdev_priv(dev);
1517 uint32_t status = qfec_reg_read(priv, STATUS_REG);
1518 uint32_t int_bits = STATUS_REG_NIS | STATUS_REG_AIS;
1519
1520 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, dev->name);
1521
1522 /* abnormal interrupt */
1523 if (status & STATUS_REG_AIS) {
1524 QFEC_LOG(QFEC_LOG_DBG, "%s: abnormal status 0x%08x\n",
1525 __func__, status);
1526
1527 ISR_ERR_CHK(priv, status, STATUS_REG_RU, rx_buf_unavail);
1528 ISR_ERR_CHK(priv, status, STATUS_REG_FBI, fatal_bus);
1529
1530 ISR_ERR_CHK(priv, status, STATUS_REG_RWT, rx_watchdog);
1531 ISR_ERR_CHK(priv, status, STATUS_REG_RPS, rx_proc_stopped);
1532 ISR_ERR_CHK(priv, status, STATUS_REG_UNF, tx_underflow);
1533
1534 ISR_ERR_CHK(priv, status, STATUS_REG_OVF, rx_overflow);
1535 ISR_ERR_CHK(priv, status, STATUS_REG_TJT, tx_jabber_tmout);
1536 ISR_ERR_CHK(priv, status, STATUS_REG_TPS, tx_proc_stopped);
1537
1538 int_bits |= STATUS_REG_AIS_BITS;
1539 CNTR_INC(priv, abnorm_int);
1540 }
1541
1542 if (status & STATUS_REG_NIS)
1543 CNTR_INC(priv, norm_int);
1544
1545 /* receive interrupt */
1546 if (status & STATUS_REG_RI) {
1547 CNTR_INC(priv, rx_isr);
1548 qfec_rx_int(dev);
1549 }
1550
1551 /* transmit interrupt */
1552 if (status & STATUS_REG_TI) {
1553 CNTR_INC(priv, tx_isr);
1554 qfec_tx_replenish(dev);
1555 }
1556
1557 /* gmac interrupt */
1558 if (status & (STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI)) {
1559 CNTR_INC(priv, gmac_isr);
1560 int_bits |= STATUS_REG_GMI;
1561 }
1562
1563 /* clear interrupts */
1564 qfec_reg_write(priv, STATUS_REG, int_bits);
1565 CNTR_INC(priv, isr);
1566
1567 return IRQ_HANDLED;
1568}
1569
1570/*
1571 * open () - register system resources (IRQ, DMA, ...)
1572 * turn on HW, perform device setup.
1573 */
1574static int qfec_open(struct net_device *dev)
1575{
1576 struct qfec_priv *priv = netdev_priv(dev);
1577 struct buf_desc *p_bd;
1578 struct ring *p_ring;
1579 struct qfec_buf_desc *p_desc;
1580 int n;
1581 int res = 0;
1582
1583 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1584
1585 if (!dev) {
1586 res = -EINVAL;
1587 goto err;
1588 }
1589
1590 /* allocate TX/RX buffer-descriptors and buffers */
1591
1592 res = qfec_mem_alloc(dev);
1593 if (res)
1594 goto err;
1595
1596 /* initialize TX */
1597 p_desc = priv->bd_base;
1598
1599 for (n = 0, p_bd = priv->p_tbd; n < priv->n_tbd; n++, p_bd++) {
1600 p_bd->p_desc = p_desc++;
1601
1602 if (n == (priv->n_tbd - 1))
1603 qfec_bd_last_bd_set(p_bd);
1604
1605 qfec_bd_own_clr(p_bd); /* clear ownership */
1606 }
1607
1608 qfec_ring_init(&priv->ring_tbd, priv->n_tbd, priv->n_tbd);
1609
1610 priv->tx_ic_mod = priv->n_tbd / TX_BD_TI_RATIO;
1611 if (priv->tx_ic_mod == 0)
1612 priv->tx_ic_mod = 1;
1613
1614 /* initialize RX buffer descriptors and allocate sk_bufs */
1615 p_ring = &priv->ring_rbd;
1616 qfec_ring_init(p_ring, priv->n_rbd, 0);
1617 qfec_bd_last_bd_set(&priv->p_rbd[priv->n_rbd - 1]);
1618
1619 for (n = 0, p_bd = priv->p_rbd; n < priv->n_rbd; n++, p_bd++) {
1620 p_bd->p_desc = p_desc++;
1621
1622 if (qfec_rbd_init(dev, p_bd))
1623 break;
1624 qfec_ring_tail_adv(p_ring);
1625 }
1626
1627 priv->p_latest_rbd = priv->p_rbd;
1628 priv->p_ending_rbd = priv->p_rbd + priv->n_rbd - 1;
1629
1630 /* config ptp clock */
1631 qfec_ptp_cfg(priv);
1632
1633 /* configure PHY - must be set before reset/hw_init */
1634 qfec_intf_sel(priv, intfc_mii);
1635
1636 /* initialize controller after BDs allocated */
1637 res = qfec_hw_init(priv);
1638 if (res)
1639 goto err1;
1640
1641 /* get/set (primary) MAC address */
1642 qfec_set_adr_regs(priv, dev->dev_addr);
1643
1644 /* start phy monitor */
1645 QFEC_LOG(QFEC_LOG_DBG, " %s: start timer\n", __func__);
1646 netif_carrier_off(priv->net_dev);
1647 setup_timer(&priv->phy_tmr, qfec_phy_monitor, (unsigned long)dev);
1648 mod_timer(&priv->phy_tmr, jiffies + HZ);
1649
1650 /* initialize interrupts */
1651 QFEC_LOG(QFEC_LOG_DBG, " %s: request irq %d\n", __func__, dev->irq);
1652 res = request_irq(dev->irq, qfec_int, 0, dev->name, dev);
1653 if (res)
1654 goto err1;
1655
1656 /* enable controller */
1657 qfec_hw_enable(priv);
1658 netif_start_queue(dev);
1659
1660 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x link, %08x carrier\n", __func__,
1661 mii_link_ok(&priv->mii), netif_carrier_ok(priv->net_dev));
1662
1663 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1664 return 0;
1665
1666err1:
1667 qfec_mem_dealloc(dev);
1668err:
1669 QFEC_LOG_ERR("%s: error - %d\n", __func__, res);
1670 return res;
1671}
1672
1673/*
1674 * stop() - "reverse operations performed at open time"
1675 */
1676static int qfec_stop(struct net_device *dev)
1677{
1678 struct qfec_priv *priv = netdev_priv(dev);
1679 struct buf_desc *p_bd;
1680 struct sk_buff *skb;
1681 int n;
1682
1683 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1684
1685 del_timer_sync(&priv->phy_tmr);
1686
1687 qfec_hw_disable(priv);
1688 qfec_queue_stop(dev);
1689 free_irq(dev->irq, dev);
1690
1691 /* free all pending sk_bufs */
1692 for (n = priv->n_rbd, p_bd = priv->p_rbd; n > 0; n--, p_bd++) {
1693 skb = qfec_bd_skbuf_get(p_bd);
1694 if (skb)
1695 dev_kfree_skb(skb);
1696 }
1697
1698 for (n = priv->n_tbd, p_bd = priv->p_tbd; n > 0; n--, p_bd++) {
1699 skb = qfec_bd_skbuf_get(p_bd);
1700 if (skb)
1701 dev_kfree_skb(skb);
1702 }
1703
1704 qfec_mem_dealloc(dev);
1705
1706 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1707
1708 return 0;
1709}
1710
1711static int qfec_set_config(struct net_device *dev, struct ifmap *map)
1712{
1713 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1714 return 0;
1715}
1716
1717/*
1718 * pass data from skbuf to buf-desc
1719 */
1720static int qfec_xmit(struct sk_buff *skb, struct net_device *dev)
1721{
1722 struct qfec_priv *priv = netdev_priv(dev);
1723 struct ring *p_ring = &priv->ring_tbd;
1724 struct buf_desc *p_bd;
1725 uint32_t ctrl = 0;
1726 int ret = NETDEV_TX_OK;
1727 unsigned long flags;
1728
1729 CNTR_INC(priv, xmit);
1730
1731 spin_lock_irqsave(&priv->xmit_lock, flags);
1732
1733 /* stop queuing if no resources available */
1734 if (qfec_ring_room(p_ring) == 0) {
1735 qfec_queue_stop(dev);
1736 CNTR_INC(priv, tx_no_resource);
1737
1738 ret = NETDEV_TX_BUSY;
1739 goto done;
1740 }
1741
1742 /* locate and save *sk_buff */
1743 p_bd = &priv->p_tbd[qfec_ring_head(p_ring)];
1744 qfec_bd_skbuf_set(p_bd, skb);
1745
1746 /* set DMA ptr to sk_buff data and write cache to memory */
1747 qfec_bd_pbuf_set(p_bd, (void *)
1748 dma_map_single(&dev->dev,
1749 (void *)skb->data, skb->len, DMA_TO_DEVICE));
1750
1751 ctrl = skb->len;
1752 if (!(qfec_ring_head(p_ring) % priv->tx_ic_mod))
1753 ctrl |= BUF_TX_IC; /* interrupt on complete */
1754
1755 /* check if timestamping enabled and requested */
1756 if (priv->state & timestamping) {
1757 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1758 CNTR_INC(priv, ts_tx_en);
1759 ctrl |= BUF_TX_IC; /* interrupt on complete */
1760 ctrl |= BUF_TX_TTSE; /* enable timestamp */
1761 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1762 }
1763 }
1764
1765 if (qfec_bd_last_bd(p_bd))
1766 ctrl |= BUF_RX_RER;
1767
1768 /* no gather, no multi buf frames */
1769 ctrl |= BUF_TX_FS | BUF_TX_LS; /* 1st and last segment */
1770
1771 qfec_bd_ctl_wr(p_bd, ctrl);
1772 qfec_bd_status_set(p_bd, BUF_OWN);
1773
1774 qfec_ring_head_adv(p_ring);
1775 qfec_reg_write(priv, TX_POLL_DEM_REG, 1); /* poll */
1776
1777done:
1778 spin_unlock_irqrestore(&priv->xmit_lock, flags);
1779
1780 return ret;
1781}
1782
1783static int qfec_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1784{
1785 struct qfec_priv *priv = netdev_priv(dev);
1786 struct hwtstamp_config *cfg = (struct hwtstamp_config *) ifr;
1787
1788 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1789
1790 if (cmd == SIOCSHWTSTAMP) {
1791 CNTR_INC(priv, ts_ioctl);
1792 QFEC_LOG(QFEC_LOG_DBG,
1793 "%s: SIOCSHWTSTAMP - %x flags %x tx %x rx\n",
1794 __func__, cfg->flags, cfg->tx_type, cfg->rx_filter);
1795
1796 cfg->flags = 0;
1797 cfg->tx_type = HWTSTAMP_TX_ON;
1798 cfg->rx_filter = HWTSTAMP_FILTER_ALL;
1799
1800 priv->state |= timestamping;
1801 qfec_reg_write(priv, TS_CTL_REG,
1802 qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSENALL);
1803
1804 return 0;
1805 }
1806
1807 return generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL);
1808}
1809
1810static struct net_device_stats *qfec_get_stats(struct net_device *dev)
1811{
1812 struct qfec_priv *priv = netdev_priv(dev);
1813
1814 QFEC_LOG(QFEC_LOG_DBG2, "qfec_stats:\n");
1815
1816 return &priv->stats;
1817}
1818
1819/*
1820 * accept new mac address
1821 */
1822static int qfec_set_mac_address(struct net_device *dev, void *p)
1823{
1824 struct qfec_priv *priv = netdev_priv(dev);
1825 struct sockaddr *addr = p;
1826
1827 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1828
1829 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1830
1831 qfec_set_adr_regs(priv, dev->dev_addr);
1832
1833 return 0;
1834}
1835
1836/*
1837 * read discontinuous MAC address from corrected fuse memory region
1838 */
1839
1840static int qfec_get_mac_address(char *buf, char *mac_base, int nBytes)
1841{
1842 static int offset[] = { 0, 1, 2, 3, 4, 8 };
1843 int n;
1844
1845 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1846
1847 for (n = 0; n < nBytes; n++)
1848 buf[n] = ioread8(mac_base + offset[n]);
1849
1850 /* check that MAC programmed */
1851 if ((buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5]) == 0) {
1852 QFEC_LOG_ERR("%s: null MAC address\n", __func__);
1853 return -ENODATA;
1854 }
1855
1856 return 0;
1857}
1858
1859/*
1860 * static definition of driver functions
1861 */
1862static const struct net_device_ops qfec_netdev_ops = {
1863 .ndo_open = qfec_open,
1864 .ndo_stop = qfec_stop,
1865 .ndo_start_xmit = qfec_xmit,
1866
1867 .ndo_do_ioctl = qfec_do_ioctl,
1868 .ndo_tx_timeout = qfec_tx_timeout,
1869 .ndo_set_mac_address = qfec_set_mac_address,
1870
1871 .ndo_change_mtu = eth_change_mtu,
1872 .ndo_validate_addr = eth_validate_addr,
1873
1874 .ndo_get_stats = qfec_get_stats,
1875 .ndo_set_config = qfec_set_config,
1876};
1877
1878/*
1879 * ethtool functions
1880 */
1881
1882static int qfec_nway_reset(struct net_device *dev)
1883{
1884 struct qfec_priv *priv = netdev_priv(dev);
1885 return mii_nway_restart(&priv->mii);
1886}
1887
1888/*
1889 * speed, duplex, auto-neg settings
1890 */
1891static void qfec_ethtool_getpauseparam(struct net_device *dev,
1892 struct ethtool_pauseparam *pp)
1893{
1894 struct qfec_priv *priv = netdev_priv(dev);
1895 u32 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
1896 u32 advert;
1897
1898 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1899
1900 /* report current settings */
1901 pp->tx_pause = (flow & FLOW_CONTROL_TFE) != 0;
1902 pp->rx_pause = (flow & FLOW_CONTROL_RFE) != 0;
1903
1904 /* report if pause is being advertised */
1905 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
1906 pp->autoneg =
1907 (advert & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
1908}
1909
1910static int qfec_ethtool_setpauseparam(struct net_device *dev,
1911 struct ethtool_pauseparam *pp)
1912{
1913 struct qfec_priv *priv = netdev_priv(dev);
1914 u32 advert;
1915
1916 QFEC_LOG(QFEC_LOG_DBG, "%s: %d aneg, %d rx, %d tx\n", __func__,
1917 pp->autoneg, pp->rx_pause, pp->tx_pause);
1918
1919 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
1920 advert &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1921
1922 /* If pause autonegotiation is enabled, but both rx and tx are not
1923 * because neither was specified in the ethtool cmd,
1924 * enable both symetrical and asymetrical pause.
1925 * otherwise, only enable the pause mode indicated by rx/tx.
1926 */
1927 if (pp->autoneg) {
1928 if (pp->rx_pause)
1929 advert |= ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP;
1930 else if (pp->tx_pause)
1931 advert |= ADVERTISE_PAUSE_ASYM;
1932 else
1933 advert |= ADVERTISE_PAUSE_CAP;
1934 }
1935
1936 qfec_mdio_write(dev, priv->phy_id, MII_ADVERTISE, advert);
1937
1938 return 0;
1939}
1940
1941/*
1942 * ethtool ring parameter (-g/G) support
1943 */
1944
1945/*
1946 * setringparamam - change the tx/rx ring lengths
1947 */
1948#define MIN_RING_SIZE 3
1949#define MAX_RING_SIZE 1000
1950static int qfec_ethtool_setringparam(struct net_device *dev,
1951 struct ethtool_ringparam *ring)
1952{
1953 struct qfec_priv *priv = netdev_priv(dev);
1954 u32 timeout = 20;
1955
1956 /* notify stack the link is down */
1957 netif_carrier_off(dev);
1958
1959 /* allow tx to complete & free skbufs on the tx ring */
1960 do {
1961 usleep_range(10000, 100000);
1962 qfec_tx_replenish(dev);
1963
1964 if (timeout-- == 0) {
1965 QFEC_LOG_ERR("%s: timeout\n", __func__);
1966 return -ETIME;
1967 }
1968 } while (!qfec_ring_empty(&priv->ring_tbd));
1969
1970
1971 qfec_stop(dev);
1972
1973 /* set tx ring size */
1974 if (ring->tx_pending < MIN_RING_SIZE)
1975 ring->tx_pending = MIN_RING_SIZE;
1976 else if (ring->tx_pending > MAX_RING_SIZE)
1977 ring->tx_pending = MAX_RING_SIZE;
1978 priv->n_tbd = ring->tx_pending;
1979
1980 /* set rx ring size */
1981 if (ring->rx_pending < MIN_RING_SIZE)
1982 ring->rx_pending = MIN_RING_SIZE;
1983 else if (ring->rx_pending > MAX_RING_SIZE)
1984 ring->rx_pending = MAX_RING_SIZE;
1985 priv->n_rbd = ring->rx_pending;
1986
1987
1988 qfec_open(dev);
1989
1990 return 0;
1991}
1992
1993/*
1994 * getringparamam - returns local values
1995 */
1996static void qfec_ethtool_getringparam(struct net_device *dev,
1997 struct ethtool_ringparam *ring)
1998{
1999 struct qfec_priv *priv = netdev_priv(dev);
2000
2001 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2002
2003 ring->rx_max_pending = MAX_RING_SIZE;
2004 ring->rx_mini_max_pending = 0;
2005 ring->rx_jumbo_max_pending = 0;
2006 ring->tx_max_pending = MAX_RING_SIZE;
2007
2008 ring->rx_pending = priv->n_rbd;
2009 ring->rx_mini_pending = 0;
2010 ring->rx_jumbo_pending = 0;
2011 ring->tx_pending = priv->n_tbd;
2012}
2013
2014/*
2015 * speed, duplex, auto-neg settings
2016 */
2017static int
2018qfec_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2019{
2020 struct qfec_priv *priv = netdev_priv(dev);
2021
2022 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2023
2024 cmd->maxrxpkt = priv->n_rbd;
2025 cmd->maxtxpkt = priv->n_tbd;
2026
2027 return mii_ethtool_gset(&priv->mii, cmd);
2028}
2029
2030static int
2031qfec_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2032{
2033 struct qfec_priv *priv = netdev_priv(dev);
2034
2035 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2036
2037 return mii_ethtool_sset(&priv->mii, cmd);
2038}
2039
2040/*
2041 * msg/debug level
2042 */
2043static u32 qfec_ethtool_getmsglevel(struct net_device *dev)
2044{
2045 return qfec_debug;
2046}
2047
2048static void qfec_ethtool_setmsglevel(struct net_device *dev, u32 level)
2049{
2050 qfec_debug ^= level; /* toggle on/off */
2051}
2052
2053/*
2054 * register dump
2055 */
2056#define DMA_DMP_OFFSET 0x0000
2057#define DMA_REG_OFFSET 0x1000
2058#define DMA_REG_LEN 23
2059
2060#define MAC_DMP_OFFSET 0x0080
2061#define MAC_REG_OFFSET 0x0000
2062#define MAC_REG_LEN 55
2063
2064#define TS_DMP_OFFSET 0x0180
2065#define TS_REG_OFFSET 0x0700
2066#define TS_REG_LEN 15
2067
2068#define MDIO_DMP_OFFSET 0x0200
2069#define MDIO_REG_LEN 16
2070
2071#define REG_SIZE (MDIO_DMP_OFFSET + (MDIO_REG_LEN * sizeof(short)))
2072
2073static int qfec_ethtool_getregs_len(struct net_device *dev)
2074{
2075 return REG_SIZE;
2076}
2077
2078static void
2079qfec_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
2080 void *buf)
2081{
2082 struct qfec_priv *priv = netdev_priv(dev);
2083 u32 *data = buf;
2084 u16 *data16;
2085 unsigned int i;
2086 unsigned int j;
2087 unsigned int n;
2088
2089 memset(buf, 0, REG_SIZE);
2090
2091 j = DMA_DMP_OFFSET / sizeof(u32);
2092 for (i = DMA_REG_OFFSET, n = DMA_REG_LEN; n--; i += sizeof(u32))
2093 data[j++] = htonl(qfec_reg_read(priv, i));
2094
2095 j = MAC_DMP_OFFSET / sizeof(u32);
2096 for (i = MAC_REG_OFFSET, n = MAC_REG_LEN; n--; i += sizeof(u32))
2097 data[j++] = htonl(qfec_reg_read(priv, i));
2098
2099 j = TS_DMP_OFFSET / sizeof(u32);
2100 for (i = TS_REG_OFFSET, n = TS_REG_LEN; n--; i += sizeof(u32))
2101 data[j++] = htonl(qfec_reg_read(priv, i));
2102
2103 data16 = (u16 *)&data[MDIO_DMP_OFFSET / sizeof(u32)];
2104 for (i = 0, n = 0; i < MDIO_REG_LEN; i++)
2105 data16[n++] = htons(qfec_mdio_read(dev, 0, i));
2106
2107 regs->len = REG_SIZE;
2108
2109 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, regs->len);
2110}
2111
2112/*
2113 * statistics
2114 * return counts of various ethernet activity.
2115 * many of these are same as in struct net_device_stats
2116 *
2117 * missed-frames indicates the number of attempts made by the ethernet
2118 * controller to write to a buffer-descriptor when the BD ownership
2119 * bit was not set. The rxfifooverflow counter (0x1D4) is not
2120 * available. The Missed Frame and Buffer Overflow Counter register
2121 * (0x1020) is used, but has only 16-bits and is reset when read.
2122 * It is read and updates the value in priv->stats.rx_missed_errors
2123 * in qfec_rx_int().
2124 */
2125static char qfec_stats_strings[][ETH_GSTRING_LEN] = {
2126 "TX good/bad Bytes ",
2127 "TX Bytes ",
2128 "TX good/bad Frames ",
2129 "TX Bcast Frames ",
2130 "TX Mcast Frames ",
2131 "TX Unicast Frames ",
2132 "TX Pause Frames ",
2133 "TX Vlan Frames ",
2134 "TX Frames 64 ",
2135 "TX Frames 65-127 ",
2136 "TX Frames 128-255 ",
2137 "TX Frames 256-511 ",
2138 "TX Frames 512-1023 ",
2139 "TX Frames 1024+ ",
2140 "TX Pause Frames ",
2141 "TX Collisions ",
2142 "TX Late Collisions ",
2143 "TX Excessive Collisions ",
2144
2145 "RX good/bad Bytes ",
2146 "RX Bytes ",
2147 "RX good/bad Frames ",
2148 "RX Bcast Frames ",
2149 "RX Mcast Frames ",
2150 "RX Unicast Frames ",
2151 "RX Pause Frames ",
2152 "RX Vlan Frames ",
2153 "RX Frames 64 ",
2154 "RX Frames 65-127 ",
2155 "RX Frames 128-255 ",
2156 "RX Frames 256-511 ",
2157 "RX Frames 512-1023 ",
2158 "RX Frames 1024+ ",
2159 "RX Pause Frames ",
2160 "RX Crc error Frames ",
2161 "RX Length error Frames ",
2162 "RX Alignment error Frames ",
2163 "RX Runt Frames ",
2164 "RX Oversize Frames ",
2165 "RX Missed Frames ",
2166
2167};
2168
2169static u32 qfec_stats_regs[] = {
2170
2171 69, 89, 70, 71, 72, 90, 92, 93,
2172 73, 74, 75, 76, 77, 78, 92, 84,
2173 86, 87,
2174
2175 97, 98, 96, 99, 100, 113, 116, 118,
2176 107, 108, 109, 110, 111, 112, 116, 101,
2177 114, 102, 103, 106
2178};
2179
2180static int qfec_stats_show(struct device *dev, struct device_attribute *attr,
2181 char *buf)
2182{
2183 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
2184 int count = PAGE_SIZE;
2185 int l = 0;
2186 int n;
2187
2188 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
2189
2190 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++) {
2191 l += snprintf(&buf[l], count - l, " %12u %s\n",
2192 qfec_reg_read(priv,
2193 qfec_stats_regs[n] * sizeof(uint32_t)),
2194 qfec_stats_strings[n]);
2195 }
2196
2197 return l;
2198}
2199
2200static int qfec_get_sset_count(struct net_device *dev, int sset)
2201{
2202 switch (sset) {
2203 case ETH_SS_STATS:
2204 return ARRAY_SIZE(qfec_stats_regs) + 1; /* missed frames */
2205
2206 default:
2207 return -EOPNOTSUPP;
2208 }
2209}
2210
2211static void qfec_ethtool_getstrings(struct net_device *dev, u32 stringset,
2212 u8 *buf)
2213{
2214 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__,
2215 sizeof(qfec_stats_strings));
2216
2217 memcpy(buf, qfec_stats_strings, sizeof(qfec_stats_strings));
2218}
2219
2220static void qfec_ethtool_getstats(struct net_device *dev,
2221 struct ethtool_stats *stats, uint64_t *data)
2222{
2223 struct qfec_priv *priv = netdev_priv(dev);
2224 int j = 0;
2225 int n;
2226
2227 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)
2228 data[j++] = qfec_reg_read(priv,
2229 qfec_stats_regs[n] * sizeof(uint32_t));
2230
2231 data[j++] = priv->stats.rx_missed_errors;
2232
2233 stats->n_stats = j;
2234}
2235
2236static void qfec_ethtool_getdrvinfo(struct net_device *dev,
2237 struct ethtool_drvinfo *info)
2238{
2239 strlcpy(info->driver, QFEC_NAME, sizeof(info->driver));
2240 strlcpy(info->version, QFEC_DRV_VER, sizeof(info->version));
2241 strlcpy(info->bus_info, dev_name(dev->dev.parent),
2242 sizeof(info->bus_info));
2243
2244 info->eedump_len = 0;
2245 info->regdump_len = qfec_ethtool_getregs_len(dev);
2246}
2247
2248/*
2249 * ethtool ops table
2250 */
2251static const struct ethtool_ops qfec_ethtool_ops = {
2252 .nway_reset = qfec_nway_reset,
2253
2254 .get_settings = qfec_ethtool_getsettings,
2255 .set_settings = qfec_ethtool_setsettings,
2256 .get_link = ethtool_op_get_link,
2257 .get_drvinfo = qfec_ethtool_getdrvinfo,
2258 .get_msglevel = qfec_ethtool_getmsglevel,
2259 .set_msglevel = qfec_ethtool_setmsglevel,
2260 .get_regs_len = qfec_ethtool_getregs_len,
2261 .get_regs = qfec_ethtool_getregs,
2262
2263 .get_ringparam = qfec_ethtool_getringparam,
2264 .set_ringparam = qfec_ethtool_setringparam,
2265
2266 .get_pauseparam = qfec_ethtool_getpauseparam,
2267 .set_pauseparam = qfec_ethtool_setpauseparam,
2268
2269 .get_sset_count = qfec_get_sset_count,
2270 .get_strings = qfec_ethtool_getstrings,
2271 .get_ethtool_stats = qfec_ethtool_getstats,
2272};
2273
2274/*
2275 * create sysfs entries
2276 */
2277static DEVICE_ATTR(bd_tx, 0444, qfec_bd_tx_show, NULL);
2278static DEVICE_ATTR(bd_rx, 0444, qfec_bd_rx_show, NULL);
2279static DEVICE_ATTR(cfg, 0444, qfec_config_show, NULL);
2280static DEVICE_ATTR(clk_reg, 0444, qfec_clk_reg_show, NULL);
2281static DEVICE_ATTR(cntrs, 0444, qfec_cntrs_show, NULL);
2282static DEVICE_ATTR(stats, 0444, qfec_stats_show, NULL);
2283static DEVICE_ATTR(reg, 0444, qfec_reg_show, NULL);
2284
2285static void qfec_sysfs_create(struct net_device *dev)
2286{
2287 if (device_create_file(&(dev->dev), &dev_attr_bd_tx) ||
2288 device_create_file(&(dev->dev), &dev_attr_bd_rx) ||
2289 device_create_file(&(dev->dev), &dev_attr_cfg) ||
2290 device_create_file(&(dev->dev), &dev_attr_clk_reg) ||
2291 device_create_file(&(dev->dev), &dev_attr_cntrs) ||
2292 device_create_file(&(dev->dev), &dev_attr_reg) ||
2293 device_create_file(&(dev->dev), &dev_attr_stats))
2294 pr_err("qfec_sysfs_create failed to create sysfs files\n");
2295}
2296
2297/*
2298 * map a specified resource
2299 */
2300static int qfec_map_resource(struct platform_device *plat, int resource,
2301 struct resource **priv_res,
2302 void **addr)
2303{
2304 struct resource *res;
2305
2306 QFEC_LOG(QFEC_LOG_DBG, "%s: 0x%x resource\n", __func__, resource);
2307
2308 /* allocate region to access controller registers */
2309 *priv_res = res = platform_get_resource(plat, resource, 0);
2310 if (!res) {
2311 QFEC_LOG_ERR("%s: platform_get_resource failed\n", __func__);
2312 return -ENODEV;
2313 }
2314
2315 res = request_mem_region(res->start, res->end - res->start, QFEC_NAME);
2316 if (!res) {
2317 QFEC_LOG_ERR("%s: request_mem_region failed, %08x %08x\n",
2318 __func__, res->start, res->end - res->start);
2319 return -EBUSY;
2320 }
2321
2322 *addr = ioremap(res->start, res->end - res->start);
2323 if (!*addr)
2324 return -ENOMEM;
2325
2326 QFEC_LOG(QFEC_LOG_DBG, " %s: io mapped from %p to %p\n",
2327 __func__, (void *)res->start, *addr);
2328
2329 return 0;
2330};
2331
2332/*
2333 * free allocated io regions
2334 */
2335static void qfec_free_res(struct resource *res, void *base)
2336{
2337
2338 if (res) {
2339 if (base)
2340 iounmap((void __iomem *)base);
2341
2342 release_mem_region(res->start, res->end - res->start);
2343 }
2344};
2345
2346/*
2347 * probe function that obtain configuration info and allocate net_device
2348 */
2349static int __devinit qfec_probe(struct platform_device *plat)
2350{
2351 struct net_device *dev;
2352 struct qfec_priv *priv;
2353 int ret = 0;
2354
2355 /* allocate device */
2356 dev = alloc_etherdev(sizeof(struct qfec_priv));
2357 if (!dev) {
2358 QFEC_LOG_ERR("%s: alloc_etherdev failed\n", __func__);
2359 ret = -ENOMEM;
2360 goto err;
2361 }
2362
2363 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x dev\n", __func__, (int)dev);
2364
2365 qfec_dev = dev;
2366 SET_NETDEV_DEV(dev, &plat->dev);
2367
2368 dev->netdev_ops = &qfec_netdev_ops;
2369 dev->ethtool_ops = &qfec_ethtool_ops;
2370 dev->watchdog_timeo = 2 * HZ;
2371 dev->irq = platform_get_irq(plat, 0);
2372
2373 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2374
2375 /* initialize private data */
2376 priv = (struct qfec_priv *)netdev_priv(dev);
2377 memset((void *)priv, 0, sizeof(priv));
2378
2379 priv->net_dev = dev;
2380 platform_set_drvdata(plat, dev);
2381
2382 priv->n_tbd = TX_BD_NUM;
2383 priv->n_rbd = RX_BD_NUM;
2384
2385 /* initialize phy structure */
2386 priv->mii.phy_id_mask = 0x1F;
2387 priv->mii.reg_num_mask = 0x1F;
2388 priv->mii.dev = dev;
2389 priv->mii.mdio_read = qfec_mdio_read;
2390 priv->mii.mdio_write = qfec_mdio_write;
2391
2392 /* map register regions */
2393 ret = qfec_map_resource(
2394 plat, IORESOURCE_MEM, &priv->mac_res, &priv->mac_base);
2395 if (ret) {
2396 QFEC_LOG_ERR("%s: IORESOURCE_MEM mac failed\n", __func__);
2397 goto err1;
2398 }
2399
2400 ret = qfec_map_resource(
2401 plat, IORESOURCE_IO, &priv->clk_res, &priv->clk_base);
2402 if (ret) {
2403 QFEC_LOG_ERR("%s: IORESOURCE_IO clk failed\n", __func__);
2404 goto err2;
2405 }
2406
2407 ret = qfec_map_resource(
2408 plat, IORESOURCE_DMA, &priv->fuse_res, &priv->fuse_base);
2409 if (ret) {
2410 QFEC_LOG_ERR("%s: IORESOURCE_DMA fuse failed\n", __func__);
2411 goto err3;
2412 }
2413
2414 /* initialize MAC addr */
2415 ret = qfec_get_mac_address(dev->dev_addr, priv->fuse_base,
2416 MAC_ADDR_SIZE);
2417 if (ret)
2418 goto err4;
2419
2420 QFEC_LOG(QFEC_LOG_DBG, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
2421 __func__,
2422 dev->dev_addr[0], dev->dev_addr[1],
2423 dev->dev_addr[2], dev->dev_addr[3],
2424 dev->dev_addr[4], dev->dev_addr[5]);
2425
2426 ret = register_netdev(dev);
2427 if (ret) {
2428 QFEC_LOG_ERR("%s: register_netdev failed\n", __func__);
2429 goto err4;
2430 }
2431
2432 spin_lock_init(&priv->mdio_lock);
2433 spin_lock_init(&priv->xmit_lock);
2434 qfec_sysfs_create(dev);
2435
2436 return 0;
2437
2438 /* error handling */
2439err4:
2440 qfec_free_res(priv->fuse_res, priv->fuse_base);
2441err3:
2442 qfec_free_res(priv->clk_res, priv->clk_base);
2443err2:
2444 qfec_free_res(priv->mac_res, priv->mac_base);
2445err1:
2446 free_netdev(dev);
2447err:
2448 QFEC_LOG_ERR("%s: err\n", __func__);
2449 return ret;
2450}
2451
2452/*
2453 * module remove
2454 */
2455static int __devexit qfec_remove(struct platform_device *plat)
2456{
2457 struct net_device *dev = platform_get_drvdata(plat);
2458 struct qfec_priv *priv = netdev_priv(dev);
2459
2460 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2461
2462 platform_set_drvdata(plat, NULL);
2463
2464 qfec_free_res(priv->fuse_res, priv->fuse_base);
2465 qfec_free_res(priv->clk_res, priv->clk_base);
2466 qfec_free_res(priv->mac_res, priv->mac_base);
2467
2468 unregister_netdev(dev);
2469 free_netdev(dev);
2470
2471 return 0;
2472}
2473
2474/*
2475 * module support
2476 * the FSM9xxx is not a mobile device does not support power management
2477 */
2478
2479static struct platform_driver qfec_driver = {
2480 .probe = qfec_probe,
2481 .remove = __devexit_p(qfec_remove),
2482 .driver = {
2483 .name = QFEC_NAME,
2484 .owner = THIS_MODULE,
2485 },
2486};
2487
2488/*
2489 * module init
2490 */
2491static int __init qfec_init_module(void)
2492{
2493 int res;
2494
2495 QFEC_LOG(QFEC_LOG_DBG, "%s: %s\n", __func__, qfec_driver.driver.name);
2496
2497 res = platform_driver_register(&qfec_driver);
2498
2499 QFEC_LOG(QFEC_LOG_DBG, "%s: %d - platform_driver_register\n",
2500 __func__, res);
2501
2502 return res;
2503}
2504
2505/*
2506 * module exit
2507 */
2508static void __exit qfec_exit_module(void)
2509{
2510 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2511
2512 platform_driver_unregister(&qfec_driver);
2513}
2514
2515MODULE_DESCRIPTION("FSM Network Driver");
2516MODULE_LICENSE("GPL v2");
2517MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
2518MODULE_VERSION("1.0");
2519
2520module_init(qfec_init_module);
2521module_exit(qfec_exit_module);