blob: 112e16afb57df5a45dc1f21cb3e8b1e087eaa18c [file] [log] [blame]
Steve Mucklef132c6c2012-06-06 18:30:57 -07001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14#include <linux/module.h>
15
16#include <linux/platform_device.h>
17
18#include <linux/types.h> /* size_t */
19#include <linux/interrupt.h> /* mark_bh */
20
21#include <linux/netdevice.h> /* struct device, and other headers */
22#include <linux/etherdevice.h> /* eth_type_trans */
23#include <linux/skbuff.h>
24
25#include <linux/proc_fs.h>
26#include <linux/timer.h>
27#include <linux/mii.h>
28
29#include <linux/ethtool.h>
30#include <linux/net_tstamp.h>
31#include <linux/phy.h>
32#include <linux/inet.h>
33
34#include "qfec.h"
35
36#define QFEC_NAME "qfec"
37#define QFEC_DRV_VER "Nov 29 2011"
38
39#define ETH_BUF_SIZE 0x600
40#define MAX_N_BD 50
41#define MAC_ADDR_SIZE 6
42
43#define RX_TX_BD_RATIO 8
44#define TX_BD_NUM 256
45#define RX_BD_NUM 256
46#define TX_BD_TI_RATIO 4
47#define MAX_MDIO_REG 32
48
49#define H_DPLX 0
50#define F_DPLX 1
51/*
52 * logging macros
53 */
54#define QFEC_LOG_PR 1
55#define QFEC_LOG_DBG 2
56#define QFEC_LOG_DBG2 4
57#define QFEC_LOG_MDIO_W 8
58#define QFEC_LOG_MDIO_R 16
59#define QFEC_MII_EXP_MASK (EXPANSION_LCWP | EXPANSION_ENABLENPAGE \
60 | EXPANSION_NPCAPABLE)
61
62static int qfec_debug = QFEC_LOG_PR;
63
64#ifdef QFEC_DEBUG
65# define QFEC_LOG(flag, ...) \
66 do { \
67 if (flag & qfec_debug) \
68 pr_info(__VA_ARGS__); \
69 } while (0)
70#else
71# define QFEC_LOG(flag, ...)
72#endif
73
74#define QFEC_LOG_ERR(...) pr_err(__VA_ARGS__)
75
76/*
77 * driver buffer-descriptor
78 * contains the 4 word HW descriptor plus an additional 4-words.
79 * (See the DSL bits in the BUS-Mode register).
80 */
81#define BD_FLAG_LAST_BD 1
82
83struct buf_desc {
84 struct qfec_buf_desc *p_desc;
85 struct sk_buff *skb;
86 void *buf_virt_addr;
87 void *buf_phys_addr;
88 uint32_t last_bd_flag;
89};
90
91/*
92 *inline functions accessing non-struct qfec_buf_desc elements
93 */
94
95/* skb */
96static inline struct sk_buff *qfec_bd_skbuf_get(struct buf_desc *p_bd)
97{
98 return p_bd->skb;
99};
100
101static inline void qfec_bd_skbuf_set(struct buf_desc *p_bd, struct sk_buff *p)
102{
103 p_bd->skb = p;
104};
105
106/* virtual addr */
107static inline void qfec_bd_virt_set(struct buf_desc *p_bd, void *addr)
108{
109 p_bd->buf_virt_addr = addr;
110};
111
112static inline void *qfec_bd_virt_get(struct buf_desc *p_bd)
113{
114 return p_bd->buf_virt_addr;
115};
116
117/* physical addr */
118static inline void qfec_bd_phys_set(struct buf_desc *p_bd, void *addr)
119{
120 p_bd->buf_phys_addr = addr;
121};
122
123static inline void *qfec_bd_phys_get(struct buf_desc *p_bd)
124{
125 return p_bd->buf_phys_addr;
126};
127
128/* last_bd_flag */
129static inline uint32_t qfec_bd_last_bd(struct buf_desc *p_bd)
130{
131 return (p_bd->last_bd_flag != 0);
132};
133
134static inline void qfec_bd_last_bd_set(struct buf_desc *p_bd)
135{
136 p_bd->last_bd_flag = BD_FLAG_LAST_BD;
137};
138
139/*
140 *inline functions accessing struct qfec_buf_desc elements
141 */
142
143/* ownership bit */
144static inline uint32_t qfec_bd_own(struct buf_desc *p_bd)
145{
146 return p_bd->p_desc->status & BUF_OWN;
147};
148
149static inline void qfec_bd_own_set(struct buf_desc *p_bd)
150{
151 p_bd->p_desc->status |= BUF_OWN ;
152};
153
154static inline void qfec_bd_own_clr(struct buf_desc *p_bd)
155{
156 p_bd->p_desc->status &= ~(BUF_OWN);
157};
158
159static inline uint32_t qfec_bd_status_get(struct buf_desc *p_bd)
160{
161 return p_bd->p_desc->status;
162};
163
164static inline void qfec_bd_status_set(struct buf_desc *p_bd, uint32_t status)
165{
166 p_bd->p_desc->status = status;
167};
168
169static inline uint32_t qfec_bd_status_len(struct buf_desc *p_bd)
170{
171 return BUF_RX_FL_GET((*p_bd->p_desc));
172};
173
174/* control register */
175static inline void qfec_bd_ctl_reset(struct buf_desc *p_bd)
176{
177 p_bd->p_desc->ctl = 0;
178};
179
180static inline uint32_t qfec_bd_ctl_get(struct buf_desc *p_bd)
181{
182 return p_bd->p_desc->ctl;
183};
184
185static inline void qfec_bd_ctl_set(struct buf_desc *p_bd, uint32_t val)
186{
187 p_bd->p_desc->ctl |= val;
188};
189
190static inline void qfec_bd_ctl_wr(struct buf_desc *p_bd, uint32_t val)
191{
192 p_bd->p_desc->ctl = val;
193};
194
195/* pbuf register */
196static inline void *qfec_bd_pbuf_get(struct buf_desc *p_bd)
197{
198 return p_bd->p_desc->p_buf;
199}
200
201static inline void qfec_bd_pbuf_set(struct buf_desc *p_bd, void *p)
202{
203 p_bd->p_desc->p_buf = p;
204}
205
206/* next register */
207static inline void *qfec_bd_next_get(struct buf_desc *p_bd)
208{
209 return p_bd->p_desc->next;
210};
211
212/*
213 * initialize an RX BD w/ a new buf
214 */
215static int qfec_rbd_init(struct net_device *dev, struct buf_desc *p_bd)
216{
217 struct sk_buff *skb;
218 void *p;
219 void *v;
220
221 /* allocate and record ptrs for sk buff */
222 skb = dev_alloc_skb(ETH_BUF_SIZE);
223 if (!skb)
224 goto err;
225
226 qfec_bd_skbuf_set(p_bd, skb);
227
228 v = skb_put(skb, ETH_BUF_SIZE);
229 qfec_bd_virt_set(p_bd, v);
230
231 p = (void *) dma_map_single(&dev->dev,
232 (void *)skb->data, ETH_BUF_SIZE, DMA_FROM_DEVICE);
233 qfec_bd_pbuf_set(p_bd, p);
234 qfec_bd_phys_set(p_bd, p);
235
236 /* populate control register */
237 /* mark the last BD and set end-of-ring bit */
238 qfec_bd_ctl_wr(p_bd, ETH_BUF_SIZE |
239 (qfec_bd_last_bd(p_bd) ? BUF_RX_RER : 0));
240
241 qfec_bd_status_set(p_bd, BUF_OWN);
242
243 if (!(qfec_debug & QFEC_LOG_DBG2))
244 return 0;
245
246 /* debug messages */
247 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p bd\n", __func__, p_bd);
248
249 QFEC_LOG(QFEC_LOG_DBG2, "%s: %p skb\n", __func__, skb);
250
251 QFEC_LOG(QFEC_LOG_DBG2,
252 "%s: %p p_bd, %p data, %p skb_put, %p virt, %p p_buf, %p p\n",
253 __func__, (void *)p_bd,
254 (void *)skb->data, v, /*(void *)skb_put(skb, ETH_BUF_SIZE), */
255 (void *)qfec_bd_virt_get(p_bd), (void *)qfec_bd_pbuf_get(p_bd),
256 (void *)p);
257
258 return 0;
259
260err:
261 return -ENOMEM;
262};
263
264/*
265 * ring structure used to maintain indices of buffer-descriptor (BD) usage
266 *
267 * The RX BDs are normally all pre-allocated with buffers available to be
268 * DMA'd into with received frames. The head indicates the first BD/buffer
269 * containing a received frame, and the tail indicates the oldest BD/buffer
270 * that needs to be restored for use. Head and tail are both initialized
271 * to zero, and n_free is initialized to zero, since all BD are initialized.
272 *
273 * The TX BDs are normally available for use, only being initialized as
274 * TX frames are requested for transmission. The head indicates the
275 * first available BD, and the tail indicate the oldest BD that has
276 * not been acknowledged as transmitted. Head and tail are both initialized
277 * to zero, and n_free is initialized to len, since all are available for use.
278 */
279struct ring {
280 int head;
281 int tail;
282 int n_free;
283 int len;
284};
285
286/* accessory in line functions for struct ring */
287static inline void qfec_ring_init(struct ring *p_ring, int size, int free)
288{
289 p_ring->head = p_ring->tail = 0;
290 p_ring->len = size;
291 p_ring->n_free = free;
292}
293
294static inline int qfec_ring_full(struct ring *p_ring)
295{
296 return (p_ring->n_free == 0);
297};
298
299static inline int qfec_ring_empty(struct ring *p_ring)
300{
301 return (p_ring->n_free == p_ring->len);
302}
303
304static inline void qfec_ring_head_adv(struct ring *p_ring)
305{
306 if (++p_ring->head == p_ring->len)
307 p_ring->head = 0;
308 p_ring->n_free--;
309};
310
311static inline void qfec_ring_tail_adv(struct ring *p_ring)
312{
313 if (++p_ring->tail == p_ring->len)
314 p_ring->tail = 0;
315 p_ring->n_free++;
316};
317
318static inline int qfec_ring_head(struct ring *p_ring)
319{
320
321 return p_ring->head;
322};
323
324static inline int qfec_ring_tail(struct ring *p_ring)
325{
326 return p_ring->tail;
327};
328
329static inline int qfec_ring_room(struct ring *p_ring)
330{
331 return p_ring->n_free;
332};
333
334/*
335 * counters track normal and abnormal driver events and activity
336 */
337enum cntr {
338 isr = 0,
339 fatal_bus,
340
341 early_tx,
342 tx_no_resource,
343 tx_proc_stopped,
344 tx_jabber_tmout,
345
346 xmit,
347 tx_int,
348 tx_isr,
349 tx_owned,
350 tx_underflow,
351
352 tx_replenish,
353 tx_skb_null,
354 tx_timeout,
355 tx_too_large,
356
357 gmac_isr,
358
359 /* half */
360 norm_int,
361 abnorm_int,
362
363 early_rx,
364 rx_buf_unavail,
365 rx_proc_stopped,
366 rx_watchdog,
367
368 netif_rx_cntr,
369 rx_int,
370 rx_isr,
371 rx_owned,
372 rx_overflow,
373
374 rx_dropped,
375 rx_skb_null,
376 queue_start,
377 queue_stop,
378
379 rx_paddr_nok,
380 ts_ioctl,
381 ts_tx_en,
382 ts_tx_rtn,
383
384 ts_rec,
385 cntr_last,
386};
387
388static char *cntr_name[] = {
389 "isr",
390 "fatal_bus",
391
392 "early_tx",
393 "tx_no_resource",
394 "tx_proc_stopped",
395 "tx_jabber_tmout",
396
397 "xmit",
398 "tx_int",
399 "tx_isr",
400 "tx_owned",
401 "tx_underflow",
402
403 "tx_replenish",
404 "tx_skb_null",
405 "tx_timeout",
406 "tx_too_large",
407
408 "gmac_isr",
409
410 /* half */
411 "norm_int",
412 "abnorm_int",
413
414 "early_rx",
415 "rx_buf_unavail",
416 "rx_proc_stopped",
417 "rx_watchdog",
418
419 "netif_rx",
420 "rx_int",
421 "rx_isr",
422 "rx_owned",
423 "rx_overflow",
424
425 "rx_dropped",
426 "rx_skb_null",
427 "queue_start",
428 "queue_stop",
429
430 "rx_paddr_nok",
431 "ts_ioctl",
432 "ts_tx_en",
433 "ts_tx_rtn",
434
435 "ts_rec",
436 ""
437};
438
439/*
440 * private data
441 */
442
443static struct net_device *qfec_dev;
444
445enum qfec_state {
446 timestamping = 0x04,
447};
448
449struct qfec_priv {
450 struct net_device *net_dev;
451 struct net_device_stats stats; /* req statistics */
452
453 struct device dev;
454
455 spinlock_t xmit_lock;
456 spinlock_t mdio_lock;
457
458 unsigned int state; /* driver state */
459
460 unsigned int bd_size; /* buf-desc alloc size */
461 struct qfec_buf_desc *bd_base; /* * qfec-buf-desc */
462 dma_addr_t tbd_dma; /* dma/phy-addr buf-desc */
463 dma_addr_t rbd_dma; /* dma/phy-addr buf-desc */
464
465 struct resource *mac_res;
466 void *mac_base; /* mac (virt) base address */
467
468 struct resource *clk_res;
469 void *clk_base; /* clk (virt) base address */
470
471 struct resource *fuse_res;
472 void *fuse_base; /* mac addr fuses */
473
474 unsigned int n_tbd; /* # of TX buf-desc */
475 struct ring ring_tbd; /* TX ring */
476 struct buf_desc *p_tbd;
477 unsigned int tx_ic_mod; /* (%) val for setting IC */
478
479 unsigned int n_rbd; /* # of RX buf-desc */
480 struct ring ring_rbd; /* RX ring */
481 struct buf_desc *p_rbd;
482
483 struct buf_desc *p_latest_rbd;
484 struct buf_desc *p_ending_rbd;
485
486 unsigned long cntr[cntr_last]; /* activity counters */
487
488 struct mii_if_info mii; /* used by mii lib */
489
490 int mdio_clk; /* phy mdio clock rate */
491 int phy_id; /* default PHY addr (0) */
492 struct timer_list phy_tmr; /* monitor PHY state */
493};
494
495/*
496 * cntrs display
497 */
498
499static int qfec_cntrs_show(struct device *dev, struct device_attribute *attr,
500 char *buf)
501{
502 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
503 int h = (cntr_last + 1) / 2;
504 int l;
505 int n;
506 int count = PAGE_SIZE;
507
508 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
509
510 l = snprintf(&buf[0], count, "%s:\n", __func__);
511 for (n = 0; n < h; n++) {
512 l += snprintf(&buf[l], count - l,
513 " %12lu %-16s %12lu %s\n",
514 priv->cntr[n], cntr_name[n],
515 priv->cntr[n+h], cntr_name[n+h]);
516 }
517
518 return l;
519}
520
521# define CNTR_INC(priv, name) (priv->cntr[name]++)
522
523/*
524 * functions that manage state
525 */
526static inline void qfec_queue_start(struct net_device *dev)
527{
528 struct qfec_priv *priv = netdev_priv(dev);
529
530 if (netif_queue_stopped(dev)) {
531 netif_wake_queue(dev);
532 CNTR_INC(priv, queue_start);
533 }
534};
535
536static inline void qfec_queue_stop(struct net_device *dev)
537{
538 struct qfec_priv *priv = netdev_priv(dev);
539
540 netif_stop_queue(dev);
541 CNTR_INC(priv, queue_stop);
542};
543
544/*
545 * functions to access and initialize the MAC registers
546 */
547static inline uint32_t qfec_reg_read(struct qfec_priv *priv, uint32_t reg)
548{
549 return ioread32((void *) (priv->mac_base + reg));
550}
551
552static void qfec_reg_write(struct qfec_priv *priv, uint32_t reg, uint32_t val)
553{
554 uint32_t addr = (uint32_t)priv->mac_base + reg;
555
556 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
557 iowrite32(val, (void *)addr);
558}
559
560/*
561 * speed/duplex/pause settings
562 */
563static int qfec_config_show(struct device *dev, struct device_attribute *attr,
564 char *buf)
565{
566 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
567 int cfg = qfec_reg_read(priv, MAC_CONFIG_REG);
568 int flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
569 int l = 0;
570 int count = PAGE_SIZE;
571
572 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
573
574 l += snprintf(&buf[l], count, "%s:", __func__);
575
576 l += snprintf(&buf[l], count - l, " [0x%08x] %4dM %s %s", cfg,
577 (cfg & MAC_CONFIG_REG_PS)
578 ? ((cfg & MAC_CONFIG_REG_FES) ? 100 : 10) : 1000,
579 cfg & MAC_CONFIG_REG_DM ? "FD" : "HD",
580 cfg & MAC_CONFIG_REG_IPC ? "IPC" : "NoIPC");
581
582 flow &= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
583 l += snprintf(&buf[l], count - l, " [0x%08x] %s", flow,
584 (flow == (FLOW_CONTROL_RFE | FLOW_CONTROL_TFE)) ? "PAUSE"
585 : ((flow == FLOW_CONTROL_RFE) ? "RX-PAUSE"
586 : ((flow == FLOW_CONTROL_TFE) ? "TX-PAUSE" : "")));
587
588 l += snprintf(&buf[l], count - l, " %s", QFEC_DRV_VER);
589 l += snprintf(&buf[l], count - l, "\n");
590 return l;
591}
592
593
594/*
595 * table and functions to initialize controller registers
596 */
597
598struct reg_entry {
599 unsigned int rdonly;
600 unsigned int addr;
601 char *label;
602 unsigned int val;
603};
604
605static struct reg_entry qfec_reg_tbl[] = {
606 { 0, BUS_MODE_REG, "BUS_MODE_REG", BUS_MODE_REG_DEFAULT },
607 { 0, AXI_BUS_MODE_REG, "AXI_BUS_MODE_REG", AXI_BUS_MODE_DEFAULT },
608 { 0, AXI_STATUS_REG, "AXI_STATUS_REG", 0 },
609
610 { 0, MAC_ADR_0_HIGH_REG, "MAC_ADR_0_HIGH_REG", 0x00000302 },
611 { 0, MAC_ADR_0_LOW_REG, "MAC_ADR_0_LOW_REG", 0x01350702 },
612
613 { 1, RX_DES_LST_ADR_REG, "RX_DES_LST_ADR_REG", 0 },
614 { 1, TX_DES_LST_ADR_REG, "TX_DES_LST_ADR_REG", 0 },
615 { 1, STATUS_REG, "STATUS_REG", 0 },
616 { 1, DEBUG_REG, "DEBUG_REG", 0 },
617
618 { 0, INTRP_EN_REG, "INTRP_EN_REG", QFEC_INTRP_SETUP},
619
620 { 1, CUR_HOST_TX_DES_REG, "CUR_HOST_TX_DES_REG", 0 },
621 { 1, CUR_HOST_RX_DES_REG, "CUR_HOST_RX_DES_REG", 0 },
622 { 1, CUR_HOST_TX_BU_ADR_REG, "CUR_HOST_TX_BU_ADR_REG", 0 },
623 { 1, CUR_HOST_RX_BU_ADR_REG, "CUR_HOST_RX_BU_ADR_REG", 0 },
624
625 { 1, MAC_FR_FILTER_REG, "MAC_FR_FILTER_REG", 0 },
626
627 { 0, MAC_CONFIG_REG, "MAC_CONFIG_REG", MAC_CONFIG_REG_SPD_1G
628 | MAC_CONFIG_REG_DM
629 | MAC_CONFIG_REG_TE
630 | MAC_CONFIG_REG_RE
631 | MAC_CONFIG_REG_IPC },
632
633 { 1, INTRP_STATUS_REG, "INTRP_STATUS_REG", 0 },
634 { 1, INTRP_MASK_REG, "INTRP_MASK_REG", 0 },
635
636 { 0, OPER_MODE_REG, "OPER_MODE_REG", OPER_MODE_REG_DEFAULT },
637
638 { 1, GMII_ADR_REG, "GMII_ADR_REG", 0 },
639 { 1, GMII_DATA_REG, "GMII_DATA_REG", 0 },
640
641 { 0, MMC_INTR_MASK_RX_REG, "MMC_INTR_MASK_RX_REG", 0xFFFFFFFF },
642 { 0, MMC_INTR_MASK_TX_REG, "MMC_INTR_MASK_TX_REG", 0xFFFFFFFF },
643
644 { 1, TS_HIGH_REG, "TS_HIGH_REG", 0 },
645 { 1, TS_LOW_REG, "TS_LOW_REG", 0 },
646
647 { 1, TS_HI_UPDT_REG, "TS_HI_UPDATE_REG", 0 },
648 { 1, TS_LO_UPDT_REG, "TS_LO_UPDATE_REG", 0 },
649 { 0, TS_SUB_SEC_INCR_REG, "TS_SUB_SEC_INCR_REG", 1 },
650 { 0, TS_CTL_REG, "TS_CTL_REG", TS_CTL_TSENALL
651 | TS_CTL_TSCTRLSSR
652 | TS_CTL_TSINIT
653 | TS_CTL_TSENA },
654};
655
656static void qfec_reg_init(struct qfec_priv *priv)
657{
658 struct reg_entry *p = qfec_reg_tbl;
659 int n = ARRAY_SIZE(qfec_reg_tbl);
660
661 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
662
663 for (; n--; p++) {
664 if (!p->rdonly)
665 qfec_reg_write(priv, p->addr, p->val);
666 }
667}
668
669/*
670 * display registers thru sysfs
671 */
672static int qfec_reg_show(struct device *dev, struct device_attribute *attr,
673 char *buf)
674{
675 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
676 struct reg_entry *p = qfec_reg_tbl;
677 int n = ARRAY_SIZE(qfec_reg_tbl);
678 int l = 0;
679 int count = PAGE_SIZE;
680
681 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
682
683 for (; n--; p++) {
684 l += snprintf(&buf[l], count - l, " %8p %04x %08x %s\n",
685 (void *)priv->mac_base + p->addr, p->addr,
686 qfec_reg_read(priv, p->addr), p->label);
687 }
688
689 return l;
690}
691
692/*
693 * set the MAC-0 address
694 */
695static void qfec_set_adr_regs(struct qfec_priv *priv, uint8_t *addr)
696{
697 uint32_t h = 0;
698 uint32_t l = 0;
699
700 h = h << 8 | addr[5];
701 h = h << 8 | addr[4];
702
703 l = l << 8 | addr[3];
704 l = l << 8 | addr[2];
705 l = l << 8 | addr[1];
706 l = l << 8 | addr[0];
707
708 qfec_reg_write(priv, MAC_ADR_0_HIGH_REG, h);
709 qfec_reg_write(priv, MAC_ADR_0_LOW_REG, l);
710
711 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x %08x\n", __func__, h, l);
712}
713
714/*
715 * set up the RX filter
716 */
717static void qfec_set_rx_mode(struct net_device *dev)
718{
719 struct qfec_priv *priv = netdev_priv(dev);
720 uint32_t filter_conf;
721 int index;
722
723 /* Clear address filter entries */
724 for (index = 1; index < MAC_ADR_MAX; ++index) {
725 qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), 0);
726 qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), 0);
727 }
728
729 if (dev->flags & IFF_PROMISC) {
730 /* Receive all frames */
731 filter_conf = MAC_FR_FILTER_RA;
732 } else if ((dev->flags & IFF_MULTICAST) == 0) {
733 /* Unicast filtering only */
734 filter_conf = MAC_FR_FILTER_HPF;
735 } else if ((netdev_mc_count(dev) > MAC_ADR_MAX - 1) ||
736 (dev->flags & IFF_ALLMULTI)) {
737 /* Unicast filtering is enabled, Pass all multicast frames */
738 filter_conf = MAC_FR_FILTER_HPF | MAC_FR_FILTER_PM;
739 } else {
740 struct netdev_hw_addr *ha;
741
742 /* Both unicast and multicast filtering are enabled */
743 filter_conf = MAC_FR_FILTER_HPF;
744
745 index = 1;
746
747 netdev_for_each_mc_addr(ha, dev) {
748 uint32_t high, low;
749
750 high = (1 << 31) | (ha->addr[5] << 8) | (ha->addr[4]);
751 low = (ha->addr[3] << 24) | (ha->addr[2] << 16) |
752 (ha->addr[1] << 8) | (ha->addr[0]);
753
754 qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), high);
755 qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), low);
756
757 index++;
758 }
759 }
760
761 qfec_reg_write(priv, MAC_FR_FILTER_REG, filter_conf);
762}
763
764/*
765 * reset the controller
766 */
767
768#define QFEC_RESET_TIMEOUT 10000
769 /* reset should always clear but did not w/o test/delay
770 * in RgMii mode. there is no spec'd max timeout
771 */
772
773static int qfec_hw_reset(struct qfec_priv *priv)
774{
775 int timeout = QFEC_RESET_TIMEOUT;
776
777 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
778
779 qfec_reg_write(priv, BUS_MODE_REG, BUS_MODE_SWR);
780
781 while (qfec_reg_read(priv, BUS_MODE_REG) & BUS_MODE_SWR) {
782 if (timeout-- == 0) {
783 QFEC_LOG_ERR("%s: timeout\n", __func__);
784 return -ETIME;
785 }
786
787 /* there were problems resetting the controller
788 * in RGMII mode when there wasn't sufficient
789 * delay between register reads
790 */
791 usleep_range(100, 200);
792 }
793
794 return 0;
795}
796
797/*
798 * initialize controller
799 */
800static int qfec_hw_init(struct qfec_priv *priv)
801{
802 int res = 0;
803
804 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
805
806 res = qfec_hw_reset(priv);
807 if (res)
808 return res;
809
810 qfec_reg_init(priv);
811
812 /* config buf-desc locations */
813 qfec_reg_write(priv, TX_DES_LST_ADR_REG, priv->tbd_dma);
814 qfec_reg_write(priv, RX_DES_LST_ADR_REG, priv->rbd_dma);
815
816 /* clear interrupts */
817 qfec_reg_write(priv, STATUS_REG, INTRP_EN_REG_NIE | INTRP_EN_REG_RIE
818 | INTRP_EN_REG_TIE | INTRP_EN_REG_TUE | INTRP_EN_REG_ETE);
819
820 if (priv->mii.supports_gmii) {
821 /* Clear RGMII */
822 qfec_reg_read(priv, SG_RG_SMII_STATUS_REG);
823 /* Disable RGMII int */
824 qfec_reg_write(priv, INTRP_MASK_REG, 1);
825 }
826
827 return res;
828}
829
830/*
831 * en/disable controller
832 */
833static void qfec_hw_enable(struct qfec_priv *priv)
834{
835 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
836
837 qfec_reg_write(priv, OPER_MODE_REG,
838 qfec_reg_read(priv, OPER_MODE_REG)
839 | OPER_MODE_REG_ST | OPER_MODE_REG_SR);
840}
841
842static void qfec_hw_disable(struct qfec_priv *priv)
843{
844 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
845
846 qfec_reg_write(priv, OPER_MODE_REG,
847 qfec_reg_read(priv, OPER_MODE_REG)
848 & ~(OPER_MODE_REG_ST | OPER_MODE_REG_SR));
849}
850
851/*
852 * interface selection
853 */
854struct intf_config {
855 uint32_t intf_sel;
856 uint32_t emac_ns;
857 uint32_t eth_x_en_ns;
858 uint32_t clkmux_sel;
859};
860
861#define ETH_X_EN_NS_REVMII (ETH_X_EN_NS_DEFAULT | ETH_TX_CLK_INV)
862#define CLKMUX_REVMII (EMAC_CLKMUX_SEL_0 | EMAC_CLKMUX_SEL_1)
863
864static struct intf_config intf_config_tbl[] = {
865 { EMAC_PHY_INTF_SEL_MII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
866 { EMAC_PHY_INTF_SEL_RGMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
867 { EMAC_PHY_INTF_SEL_REVMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_REVMII,
868 CLKMUX_REVMII }
869};
870
871/*
872 * emac clk register read and write functions
873 */
874static inline uint32_t qfec_clkreg_read(struct qfec_priv *priv, uint32_t reg)
875{
876 return ioread32((void *) (priv->clk_base + reg));
877}
878
879static inline void qfec_clkreg_write(struct qfec_priv *priv,
880 uint32_t reg, uint32_t val)
881{
882 uint32_t addr = (uint32_t)priv->clk_base + reg;
883
884 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
885 iowrite32(val, (void *)addr);
886}
887
888/*
889 * configure the PHY interface and clock routing and signal bits
890 */
891enum phy_intfc {
892 INTFC_MII = 0,
893 INTFC_RGMII = 1,
894 INTFC_REVMII = 2,
895};
896
897static int qfec_intf_sel(struct qfec_priv *priv, unsigned int intfc)
898{
899 struct intf_config *p;
900
901 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d\n", __func__, intfc);
902
903 if (intfc > INTFC_REVMII) {
904 QFEC_LOG_ERR("%s: range\n", __func__);
905 return -ENXIO;
906 }
907
908 p = &intf_config_tbl[intfc];
909
910 qfec_clkreg_write(priv, EMAC_PHY_INTF_SEL_REG, p->intf_sel);
911 qfec_clkreg_write(priv, EMAC_NS_REG, p->emac_ns);
912 qfec_clkreg_write(priv, ETH_X_EN_NS_REG, p->eth_x_en_ns);
913 qfec_clkreg_write(priv, EMAC_CLKMUX_SEL_REG, p->clkmux_sel);
914
915 return 0;
916}
917
918/*
919 * display registers thru proc-fs
920 */
921static struct qfec_clk_reg {
922 uint32_t offset;
923 char *label;
924} qfec_clk_regs[] = {
925 { ETH_MD_REG, "ETH_MD_REG" },
926 { ETH_NS_REG, "ETH_NS_REG" },
927 { ETH_X_EN_NS_REG, "ETH_X_EN_NS_REG" },
928 { EMAC_PTP_MD_REG, "EMAC_PTP_MD_REG" },
929 { EMAC_PTP_NS_REG, "EMAC_PTP_NS_REG" },
930 { EMAC_NS_REG, "EMAC_NS_REG" },
931 { EMAC_TX_FS_REG, "EMAC_TX_FS_REG" },
932 { EMAC_RX_FS_REG, "EMAC_RX_FS_REG" },
933 { EMAC_PHY_INTF_SEL_REG, "EMAC_PHY_INTF_SEL_REG" },
934 { EMAC_PHY_ADDR_REG, "EMAC_PHY_ADDR_REG" },
935 { EMAC_REVMII_PHY_ADDR_REG, "EMAC_REVMII_PHY_ADDR_REG" },
936 { EMAC_CLKMUX_SEL_REG, "EMAC_CLKMUX_SEL_REG" },
937};
938
939static int qfec_clk_reg_show(struct device *dev, struct device_attribute *attr,
940 char *buf)
941{
942 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
943 struct qfec_clk_reg *p = qfec_clk_regs;
944 int n = ARRAY_SIZE(qfec_clk_regs);
945 int l = 0;
946 int count = PAGE_SIZE;
947
948 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
949
950 for (; n--; p++) {
951 l += snprintf(&buf[l], count - l, " %8p %8x %08x %s\n",
952 (void *)priv->clk_base + p->offset, p->offset,
953 qfec_clkreg_read(priv, p->offset), p->label);
954 }
955
956 return l;
957}
958
959/*
960 * speed selection
961 */
962
963struct qfec_pll_cfg {
964 uint32_t spd;
965 uint32_t eth_md; /* M [31:16], NOT 2*D [15:0] */
966 uint32_t eth_ns; /* NOT(M-N) [31:16], ctl bits [11:0] */
967};
968
969static struct qfec_pll_cfg qfec_pll_cfg_tbl[] = {
970 /* 2.5 MHz */
971 { MAC_CONFIG_REG_SPD_10, ETH_MD_M(1) | ETH_MD_2D_N(100),
972 ETH_NS_NM(100-1)
973 | ETH_NS_MCNTR_EN
974 | ETH_NS_MCNTR_MODE_DUAL
975 | ETH_NS_PRE_DIV(0)
976 | CLK_SRC_PLL_EMAC },
977 /* 25 MHz */
978 { MAC_CONFIG_REG_SPD_100, ETH_MD_M(1) | ETH_MD_2D_N(10),
979 ETH_NS_NM(10-1)
980 | ETH_NS_MCNTR_EN
981 | ETH_NS_MCNTR_MODE_DUAL
982 | ETH_NS_PRE_DIV(0)
983 | CLK_SRC_PLL_EMAC },
984 /* 125 MHz */
985 {MAC_CONFIG_REG_SPD_1G, 0, ETH_NS_PRE_DIV(1)
986 | CLK_SRC_PLL_EMAC },
987};
988
989enum speed {
990 SPD_10 = 0,
991 SPD_100 = 1,
992 SPD_1000 = 2,
993};
994
995/*
996 * configure the PHY interface and clock routing and signal bits
997 */
998static int qfec_speed_cfg(struct net_device *dev, unsigned int spd,
999 unsigned int dplx)
1000{
1001 struct qfec_priv *priv = netdev_priv(dev);
1002 struct qfec_pll_cfg *p;
1003
1004 QFEC_LOG(QFEC_LOG_DBG2, "%s: %d spd, %d dplx\n", __func__, spd, dplx);
1005
1006 if (spd > SPD_1000) {
1007 QFEC_LOG_ERR("%s: range\n", __func__);
1008 return -ENODEV;
1009 }
1010
1011 p = &qfec_pll_cfg_tbl[spd];
1012
1013 /* set the MAC speed bits */
1014 qfec_reg_write(priv, MAC_CONFIG_REG,
1015 (qfec_reg_read(priv, MAC_CONFIG_REG)
1016 & ~(MAC_CONFIG_REG_SPD | MAC_CONFIG_REG_DM))
1017 | p->spd | (dplx ? MAC_CONFIG_REG_DM : H_DPLX));
1018
1019 qfec_clkreg_write(priv, ETH_MD_REG, p->eth_md);
1020 qfec_clkreg_write(priv, ETH_NS_REG, p->eth_ns);
1021
1022 return 0;
1023}
1024
1025/*
1026 * configure PTP divider for 25 MHz assuming EMAC PLL 250 MHz
1027 */
1028
1029static struct qfec_pll_cfg qfec_pll_ptp = {
1030 /* 19.2 MHz tcxo */
1031 0, 0, ETH_NS_PRE_DIV(0)
1032 | EMAC_PTP_NS_ROOT_EN
1033 | EMAC_PTP_NS_CLK_EN
1034 | CLK_SRC_TCXO
1035};
1036
1037#define PLLTEST_PAD_CFG 0x01E0
1038#define PLLTEST_PLL_7 0x3700
1039
1040#define CLKTEST_REG 0x01EC
1041#define CLKTEST_EMAC_RX 0x3fc07f7a
1042
1043static int qfec_ptp_cfg(struct qfec_priv *priv)
1044{
1045 struct qfec_pll_cfg *p = &qfec_pll_ptp;
1046
1047 QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x md, %08x ns\n",
1048 __func__, p->eth_md, p->eth_ns);
1049
1050 qfec_clkreg_write(priv, EMAC_PTP_MD_REG, p->eth_md);
1051 qfec_clkreg_write(priv, EMAC_PTP_NS_REG, p->eth_ns);
1052
1053 /* configure HS/LS clk test ports to verify clks */
1054 qfec_clkreg_write(priv, CLKTEST_REG, CLKTEST_EMAC_RX);
1055 qfec_clkreg_write(priv, PLLTEST_PAD_CFG, PLLTEST_PLL_7);
1056
1057 return 0;
1058}
1059
1060/*
1061 * MDIO operations
1062 */
1063
1064/*
1065 * wait reasonable amount of time for MDIO operation to complete, not busy
1066 */
1067static int qfec_mdio_busy(struct net_device *dev)
1068{
1069 int i;
1070
1071 for (i = 100; i > 0; i--) {
1072 if (!(qfec_reg_read(
1073 netdev_priv(dev), GMII_ADR_REG) & GMII_ADR_REG_GB)) {
1074 return 0;
1075 }
1076 udelay(1);
1077 }
1078
1079 return -ETIME;
1080}
1081
1082/*
1083 * initiate either a read or write MDIO operation
1084 */
1085
1086static int qfec_mdio_oper(struct net_device *dev, int phy_id, int reg, int wr)
1087{
1088 struct qfec_priv *priv = netdev_priv(dev);
1089 int res = 0;
1090
1091 /* insure phy not busy */
1092 res = qfec_mdio_busy(dev);
1093 if (res) {
1094 QFEC_LOG_ERR("%s: busy\n", __func__);
1095 goto done;
1096 }
1097
1098 /* initiate operation */
1099 qfec_reg_write(priv, GMII_ADR_REG,
1100 GMII_ADR_REG_ADR_SET(phy_id)
1101 | GMII_ADR_REG_REG_SET(reg)
1102 | GMII_ADR_REG_CSR_SET(priv->mdio_clk)
1103 | (wr ? GMII_ADR_REG_GW : 0)
1104 | GMII_ADR_REG_GB);
1105
1106 /* wait for operation to complete */
1107 res = qfec_mdio_busy(dev);
1108 if (res)
1109 QFEC_LOG_ERR("%s: timeout\n", __func__);
1110
1111done:
1112 return res;
1113}
1114
1115/*
1116 * read MDIO register
1117 */
1118static int qfec_mdio_read(struct net_device *dev, int phy_id, int reg)
1119{
1120 struct qfec_priv *priv = netdev_priv(dev);
1121 int res = 0;
1122 unsigned long flags;
1123
1124 spin_lock_irqsave(&priv->mdio_lock, flags);
1125
1126 res = qfec_mdio_oper(dev, phy_id, reg, 0);
1127 if (res) {
1128 QFEC_LOG_ERR("%s: oper\n", __func__);
1129 goto done;
1130 }
1131
1132 res = qfec_reg_read(priv, GMII_DATA_REG);
1133 QFEC_LOG(QFEC_LOG_MDIO_R, "%s: %2d reg, 0x%04x val\n",
1134 __func__, reg, res);
1135
1136done:
1137 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1138 return res;
1139}
1140
1141/*
1142 * write MDIO register
1143 */
1144static void qfec_mdio_write(struct net_device *dev, int phy_id, int reg,
1145 int val)
1146{
1147 struct qfec_priv *priv = netdev_priv(dev);
1148 unsigned long flags;
1149
1150 spin_lock_irqsave(&priv->mdio_lock, flags);
1151
1152 QFEC_LOG(QFEC_LOG_MDIO_W, "%s: %2d reg, %04x\n",
1153 __func__, reg, val);
1154
1155 qfec_reg_write(priv, GMII_DATA_REG, val);
1156
1157 if (qfec_mdio_oper(dev, phy_id, reg, 1))
1158 QFEC_LOG_ERR("%s: oper\n", __func__);
1159
1160 spin_unlock_irqrestore(&priv->mdio_lock, flags);
1161}
1162
1163/*
1164 * MDIO show
1165 */
1166static int qfec_mdio_show(struct device *dev, struct device_attribute *attr,
1167 char *buf)
1168{
1169 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1170 int n;
1171 int l = 0;
1172 int count = PAGE_SIZE;
1173
1174 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
1175
1176 for (n = 0; n < MAX_MDIO_REG; n++) {
1177 if (!(n % 8))
1178 l += snprintf(&buf[l], count - l, "\n %02x: ", n);
1179
1180 l += snprintf(&buf[l], count - l, " %04x",
1181 qfec_mdio_read(to_net_dev(dev), priv->phy_id, n));
1182 }
1183 l += snprintf(&buf[l], count - l, "\n");
1184
1185 return l;
1186}
1187
1188/*
1189 * get auto-negotiation results
1190 */
1191#define QFEC_100 (LPA_100HALF | LPA_100FULL | LPA_100HALF)
1192#define QFEC_100_FD (LPA_100FULL | LPA_100BASE4)
1193#define QFEC_10 (LPA_10HALF | LPA_10FULL)
1194#define QFEC_10_FD LPA_10FULL
1195
1196static void qfec_get_an(struct net_device *dev, uint32_t *spd, uint32_t *dplx)
1197{
1198 struct qfec_priv *priv = netdev_priv(dev);
1199 uint32_t advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
1200 uint32_t lpa = qfec_mdio_read(dev, priv->phy_id, MII_LPA);
1201 uint32_t mastCtrl = qfec_mdio_read(dev, priv->phy_id, MII_CTRL1000);
1202 uint32_t mastStat = qfec_mdio_read(dev, priv->phy_id, MII_STAT1000);
1203 uint32_t anExp = qfec_mdio_read(dev, priv->phy_id, MII_EXPANSION);
1204 uint32_t status = advert & lpa;
1205 uint32_t flow;
1206
1207 if (priv->mii.supports_gmii) {
1208 if (((anExp & QFEC_MII_EXP_MASK) == QFEC_MII_EXP_MASK)
1209 && (mastCtrl & ADVERTISE_1000FULL)
1210 && (mastStat & LPA_1000FULL)) {
1211 *spd = SPD_1000;
1212 *dplx = F_DPLX;
1213 goto pause;
1214 }
1215
1216 else if (((anExp & QFEC_MII_EXP_MASK) == QFEC_MII_EXP_MASK)
1217 && (mastCtrl & ADVERTISE_1000HALF)
1218 && (mastStat & LPA_1000HALF)) {
1219 *spd = SPD_1000;
1220 *dplx = H_DPLX;
1221 goto pause;
1222 }
1223 }
1224
1225 /* mii speeds */
1226 if (status & QFEC_100) {
1227 *spd = SPD_100;
1228 *dplx = status & QFEC_100_FD ? F_DPLX : H_DPLX;
1229 }
1230
1231 else if (status & QFEC_10) {
1232 *spd = SPD_10;
1233 *dplx = status & QFEC_10_FD ? F_DPLX : H_DPLX;
1234 }
1235
1236 /* check pause */
1237pause:
1238 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
1239 flow &= ~(FLOW_CONTROL_TFE | FLOW_CONTROL_RFE);
1240
1241 if (status & ADVERTISE_PAUSE_CAP) {
1242 flow |= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
1243 } else if (status & ADVERTISE_PAUSE_ASYM) {
1244 if (lpa & ADVERTISE_PAUSE_CAP)
1245 flow |= FLOW_CONTROL_TFE;
1246 else if (advert & ADVERTISE_PAUSE_CAP)
1247 flow |= FLOW_CONTROL_RFE;
1248 }
1249
1250 qfec_reg_write(priv, FLOW_CONTROL_REG, flow);
1251}
1252
1253/*
1254 * monitor phy status, and process auto-neg results when changed
1255 */
1256
1257static void qfec_phy_monitor(unsigned long data)
1258{
1259 struct net_device *dev = (struct net_device *) data;
1260 struct qfec_priv *priv = netdev_priv(dev);
1261 unsigned int spd = H_DPLX;
1262 unsigned int dplx = F_DPLX;
1263
1264 mod_timer(&priv->phy_tmr, jiffies + HZ);
1265
1266 if (mii_link_ok(&priv->mii) && !netif_carrier_ok(priv->net_dev)) {
1267 qfec_get_an(dev, &spd, &dplx);
1268 qfec_speed_cfg(dev, spd, dplx);
1269 QFEC_LOG(QFEC_LOG_DBG, "%s: link up, %d spd, %d dplx\n",
1270 __func__, spd, dplx);
1271
1272 netif_carrier_on(dev);
1273 }
1274
1275 else if (!mii_link_ok(&priv->mii) && netif_carrier_ok(priv->net_dev)) {
1276 QFEC_LOG(QFEC_LOG_DBG, "%s: link down\n", __func__);
1277 netif_carrier_off(dev);
1278 }
1279}
1280
1281/*
1282 * dealloc buffer descriptor memory
1283 */
1284
1285static void qfec_mem_dealloc(struct net_device *dev)
1286{
1287 struct qfec_priv *priv = netdev_priv(dev);
1288
1289 dma_free_coherent(&dev->dev,
1290 priv->bd_size, priv->bd_base, priv->tbd_dma);
1291 priv->bd_base = 0;
1292}
1293
1294/*
1295 * allocate shared device memory for TX/RX buf-desc (and buffers)
1296 */
1297
1298static int qfec_mem_alloc(struct net_device *dev)
1299{
1300 struct qfec_priv *priv = netdev_priv(dev);
1301
1302 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1303
1304 priv->bd_size =
1305 (priv->n_tbd + priv->n_rbd) * sizeof(struct qfec_buf_desc);
1306
1307 priv->p_tbd = kcalloc(priv->n_tbd, sizeof(struct buf_desc), GFP_KERNEL);
1308 if (!priv->p_tbd) {
1309 QFEC_LOG_ERR("%s: kcalloc failed p_tbd\n", __func__);
1310 return -ENOMEM;
1311 }
1312
1313 priv->p_rbd = kcalloc(priv->n_rbd, sizeof(struct buf_desc), GFP_KERNEL);
1314 if (!priv->p_rbd) {
1315 QFEC_LOG_ERR("%s: kcalloc failed p_rbd\n", __func__);
1316 return -ENOMEM;
1317 }
1318
1319 /* alloc mem for buf-desc, if not already alloc'd */
1320 if (!priv->bd_base) {
1321 priv->bd_base = dma_alloc_coherent(&dev->dev,
1322 priv->bd_size, &priv->tbd_dma,
1323 GFP_KERNEL | __GFP_DMA);
1324 }
1325
1326 if (!priv->bd_base) {
1327 QFEC_LOG_ERR("%s: dma_alloc_coherent failed\n", __func__);
1328 return -ENOMEM;
1329 }
1330
1331 priv->rbd_dma = priv->tbd_dma
1332 + (priv->n_tbd * sizeof(struct qfec_buf_desc));
1333
1334 QFEC_LOG(QFEC_LOG_DBG,
1335 " %s: 0x%08x size, %d n_tbd, %d n_rbd\n",
1336 __func__, priv->bd_size, priv->n_tbd, priv->n_rbd);
1337
1338 return 0;
1339}
1340
1341/*
1342 * display buffer descriptors
1343 */
1344
1345static int qfec_bd_fmt(char *buf, int size, struct buf_desc *p_bd)
1346{
1347 return snprintf(buf, size,
1348 "%8p: %08x %08x %8p %8p %8p %8p %8p %x",
1349 p_bd, qfec_bd_status_get(p_bd),
1350 qfec_bd_ctl_get(p_bd), qfec_bd_pbuf_get(p_bd),
1351 qfec_bd_next_get(p_bd), qfec_bd_skbuf_get(p_bd),
1352 qfec_bd_virt_get(p_bd), qfec_bd_phys_get(p_bd),
1353 qfec_bd_last_bd(p_bd));
1354}
1355
1356static int qfec_bd_show(char *buf, int count, struct buf_desc *p_bd, int n_bd,
1357 struct ring *p_ring, char *label)
1358{
1359 int l = 0;
1360 int n;
1361
1362 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, label);
1363
1364 l += snprintf(&buf[l], count, "%s: %s\n", __func__, label);
1365 if (!p_bd)
1366 return l;
1367
1368 n_bd = n_bd > MAX_N_BD ? MAX_N_BD : n_bd;
1369
1370 for (n = 0; n < n_bd; n++, p_bd++) {
1371 l += qfec_bd_fmt(&buf[l], count - l, p_bd);
1372 l += snprintf(&buf[l], count - l, "%s%s\n",
1373 (qfec_ring_head(p_ring) == n ? " < h" : ""),
1374 (qfec_ring_tail(p_ring) == n ? " < t" : ""));
1375 }
1376
1377 return l;
1378}
1379
1380/*
1381 * display TX BDs
1382 */
1383static int qfec_bd_tx_show(struct device *dev, struct device_attribute *attr,
1384 char *buf)
1385{
1386 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1387 int count = PAGE_SIZE;
1388
1389 return qfec_bd_show(buf, count, priv->p_tbd, priv->n_tbd,
1390 &priv->ring_tbd, "TX");
1391}
1392
1393/*
1394 * display RX BDs
1395 */
1396static int qfec_bd_rx_show(struct device *dev, struct device_attribute *attr,
1397 char *buf)
1398{
1399 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1400 int count = PAGE_SIZE;
1401
1402 return qfec_bd_show(buf, count, priv->p_rbd, priv->n_rbd,
1403 &priv->ring_rbd, "RX");
1404}
1405
1406/*
1407 * process timestamp values
1408 * The pbuf and next fields of the buffer descriptors are overwritten
1409 * with the timestamp high and low register values.
1410 *
1411 * The low register is incremented by the value in the subsec_increment
1412 * register and overflows at 0x8000 0000 causing the high register to
1413 * increment.
1414 *
1415 * The subsec_increment register is recommended to be set to the number
1416 * of nanosec corresponding to each clock tic, scaled by 2^31 / 10^9
1417 * (e.g. 40 * 2^32 / 10^9 = 85.9, or 86 for 25 MHz). However, the
1418 * rounding error in this case will result in a 1 sec error / ~14 mins.
1419 *
1420 * An alternate approach is used. The subsec_increment is set to 1,
1421 * and the concatenation of the 2 timestamp registers used to count
1422 * clock tics. The 63-bit result is manipulated to determine the number
1423 * of sec and ns.
1424 */
1425
1426/*
1427 * convert 19.2 MHz clock tics into sec/ns
1428 */
1429#define TS_LOW_REG_BITS 31
1430
1431#define MILLION 1000000UL
1432#define BILLION 1000000000UL
1433
1434#define F_CLK 19200000UL
1435#define F_CLK_PRE_SC 24
1436#define F_CLK_INV_Q 56
1437#define F_CLK_INV (((unsigned long long)1 << F_CLK_INV_Q) / F_CLK)
1438#define F_CLK_TO_NS_Q 25
1439#define F_CLK_TO_NS \
1440 (((((unsigned long long)1<<F_CLK_TO_NS_Q)*BILLION)+(F_CLK-1))/F_CLK)
1441#define US_TO_F_CLK_Q 20
1442#define US_TO_F_CLK \
1443 (((((unsigned long long)1<<US_TO_F_CLK_Q)*F_CLK)+(MILLION-1))/MILLION)
1444
1445static inline void qfec_get_sec(uint64_t *cnt,
1446 uint32_t *sec, uint32_t *ns)
1447{
1448 unsigned long long t;
1449 unsigned long long subsec;
1450
1451 t = *cnt >> F_CLK_PRE_SC;
1452 t *= F_CLK_INV;
1453 t >>= F_CLK_INV_Q - F_CLK_PRE_SC;
1454 *sec = t;
1455
1456 t = *cnt - (t * F_CLK);
1457 subsec = t;
1458
1459 if (subsec >= F_CLK) {
1460 subsec -= F_CLK;
1461 *sec += 1;
1462 }
1463
1464 subsec *= F_CLK_TO_NS;
1465 subsec >>= F_CLK_TO_NS_Q;
1466 *ns = subsec;
1467}
1468
1469/*
1470 * read ethernet timestamp registers, pass up raw register values
1471 * and values converted to sec/ns
1472 */
1473static void qfec_read_timestamp(struct buf_desc *p_bd,
1474 struct skb_shared_hwtstamps *ts)
1475{
1476 unsigned long long cnt;
1477 unsigned int sec;
1478 unsigned int subsec;
1479
1480 cnt = (unsigned long)qfec_bd_next_get(p_bd);
1481 cnt <<= TS_LOW_REG_BITS;
1482 cnt |= (unsigned long)qfec_bd_pbuf_get(p_bd);
1483
1484 /* report raw counts as concatenated 63 bits */
1485 sec = cnt >> 32;
1486 subsec = cnt & 0xffffffff;
1487
1488 ts->hwtstamp = ktime_set(sec, subsec);
1489
1490 /* translate counts to sec and ns */
1491 qfec_get_sec(&cnt, &sec, &subsec);
1492
1493 ts->syststamp = ktime_set(sec, subsec);
1494}
1495
1496/*
1497 * capture the current system time in the timestamp registers
1498 */
1499static int qfec_cmd(struct device *dev, struct device_attribute *attr,
1500 const char *buf, size_t count)
1501{
1502 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1503 struct timeval tv;
1504
1505 if (!strncmp(buf, "setTs", 5)) {
1506 unsigned long long cnt;
1507 uint32_t ts_hi;
1508 uint32_t ts_lo;
1509 unsigned long long subsec;
1510
1511 do_gettimeofday(&tv);
1512
1513 /* convert raw sec/usec to ns */
1514 subsec = tv.tv_usec;
1515 subsec *= US_TO_F_CLK;
1516 subsec >>= US_TO_F_CLK_Q;
1517
1518 cnt = tv.tv_sec;
1519 cnt *= F_CLK;
1520 cnt += subsec;
1521
1522 ts_hi = cnt >> 31;
1523 ts_lo = cnt & 0x7FFFFFFF;
1524
1525 qfec_reg_write(priv, TS_HI_UPDT_REG, ts_hi);
1526 qfec_reg_write(priv, TS_LO_UPDT_REG, ts_lo);
1527
1528 qfec_reg_write(priv, TS_CTL_REG,
1529 qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSINIT);
1530 } else
1531 pr_err("%s: unknown cmd, %s.\n", __func__, buf);
1532
1533 return strnlen(buf, count);
1534}
1535
1536/*
1537 * display ethernet tstamp and system time
1538 */
1539static int qfec_tstamp_show(struct device *dev, struct device_attribute *attr,
1540 char *buf)
1541{
1542 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
1543 int count = PAGE_SIZE;
1544 int l;
1545 struct timeval tv;
1546 unsigned long long cnt;
1547 uint32_t sec;
1548 uint32_t ns;
1549 uint32_t ts_hi;
1550 uint32_t ts_lo;
1551
1552 /* insure that ts_hi didn't increment during read */
1553 do {
1554 ts_hi = qfec_reg_read(priv, TS_HIGH_REG);
1555 ts_lo = qfec_reg_read(priv, TS_LOW_REG);
1556 } while (ts_hi != qfec_reg_read(priv, TS_HIGH_REG));
1557
1558 cnt = ts_hi;
1559 cnt <<= TS_LOW_REG_BITS;
1560 cnt |= ts_lo;
1561
1562 do_gettimeofday(&tv);
1563
1564 ts_hi = cnt >> 32;
1565 ts_lo = cnt & 0xffffffff;
1566
1567 qfec_get_sec(&cnt, &sec, &ns);
1568
1569 l = snprintf(buf, count,
1570 "%12u.%09u sec 0x%08x 0x%08x tstamp %12u.%06u time-of-day\n",
1571 sec, ns, ts_hi, ts_lo, (int)tv.tv_sec, (int)tv.tv_usec);
1572
1573 return l;
1574}
1575
1576/*
1577 * free transmitted skbufs from buffer-descriptor no owned by HW
1578 */
1579static int qfec_tx_replenish(struct net_device *dev)
1580{
1581 struct qfec_priv *priv = netdev_priv(dev);
1582 struct ring *p_ring = &priv->ring_tbd;
1583 struct buf_desc *p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1584 struct sk_buff *skb;
1585 unsigned long flags;
1586
1587 CNTR_INC(priv, tx_replenish);
1588
1589 spin_lock_irqsave(&priv->xmit_lock, flags);
1590
1591 while (!qfec_ring_empty(p_ring)) {
1592 if (qfec_bd_own(p_bd))
1593 break; /* done for now */
1594
1595 skb = qfec_bd_skbuf_get(p_bd);
1596 if (unlikely(skb == NULL)) {
1597 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1598 CNTR_INC(priv, tx_skb_null);
1599 break;
1600 }
1601
1602 qfec_reg_write(priv, STATUS_REG,
1603 STATUS_REG_TU | STATUS_REG_TI);
1604
1605 /* retrieve timestamp if requested */
1606 if (qfec_bd_status_get(p_bd) & BUF_TX_TTSS) {
1607 CNTR_INC(priv, ts_tx_rtn);
1608 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1609 skb_tstamp_tx(skb, skb_hwtstamps(skb));
1610 }
1611
1612 /* update statistics before freeing skb */
1613 priv->stats.tx_packets++;
1614 priv->stats.tx_bytes += skb->len;
1615
1616 dma_unmap_single(&dev->dev, (dma_addr_t) qfec_bd_pbuf_get(p_bd),
1617 skb->len, DMA_TO_DEVICE);
1618
1619 dev_kfree_skb_any(skb);
1620 qfec_bd_skbuf_set(p_bd, NULL);
1621
1622 qfec_ring_tail_adv(p_ring);
1623 p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
1624 }
1625
1626 spin_unlock_irqrestore(&priv->xmit_lock, flags);
1627
1628 qfec_queue_start(dev);
1629
1630 return 0;
1631}
1632
1633/*
1634 * clear ownership bits of all TX buf-desc and release the sk-bufs
1635 */
1636static void qfec_tx_timeout(struct net_device *dev)
1637{
1638 struct qfec_priv *priv = netdev_priv(dev);
1639 struct buf_desc *bd = priv->p_tbd;
1640 int n;
1641
1642 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1643 CNTR_INC(priv, tx_timeout);
1644
1645 for (n = 0; n < priv->n_tbd; n++, bd++)
1646 qfec_bd_own_clr(bd);
1647
1648 qfec_tx_replenish(dev);
1649}
1650
1651/*
1652 * rx() - process a received frame
1653 */
1654static void qfec_rx_int(struct net_device *dev)
1655{
1656 struct qfec_priv *priv = netdev_priv(dev);
1657 struct ring *p_ring = &priv->ring_rbd;
1658 struct buf_desc *p_bd = priv->p_latest_rbd;
1659 uint32_t desc_status;
1660 uint32_t mis_fr_reg;
1661
1662 desc_status = qfec_bd_status_get(p_bd);
1663 mis_fr_reg = qfec_reg_read(priv, MIS_FR_REG);
1664
1665 CNTR_INC(priv, rx_int);
1666
1667 /* check that valid interrupt occurred */
1668 if (unlikely(desc_status & BUF_OWN))
1669 return;
1670
1671 /* accumulate missed-frame count (reg reset when read) */
1672 priv->stats.rx_missed_errors += mis_fr_reg
1673 & MIS_FR_REG_MISS_CNT;
1674
1675 /* process all unowned frames */
1676 while (!(desc_status & BUF_OWN) && (!qfec_ring_full(p_ring))) {
1677 struct sk_buff *skb;
1678 struct buf_desc *p_bd_next;
1679
1680 skb = qfec_bd_skbuf_get(p_bd);
1681
1682 if (unlikely(skb == NULL)) {
1683 QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
1684 CNTR_INC(priv, rx_skb_null);
1685 break;
1686 }
1687
1688 /* cache coherency before skb->data is accessed */
1689 dma_unmap_single(&dev->dev,
1690 (dma_addr_t) qfec_bd_phys_get(p_bd),
1691 ETH_BUF_SIZE, DMA_FROM_DEVICE);
1692 prefetch(skb->data);
1693
1694 if (unlikely(desc_status & BUF_RX_ES)) {
1695 priv->stats.rx_dropped++;
1696 CNTR_INC(priv, rx_dropped);
1697 dev_kfree_skb(skb);
1698 } else {
1699 qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
1700
1701 skb->len = BUF_RX_FL_GET_FROM_STATUS(desc_status);
1702
1703 if (priv->state & timestamping) {
1704 CNTR_INC(priv, ts_rec);
1705 qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
1706 }
1707
1708 /* update statistics before freeing skb */
1709 priv->stats.rx_packets++;
1710 priv->stats.rx_bytes += skb->len;
1711
1712 skb->dev = dev;
1713 skb->protocol = eth_type_trans(skb, dev);
1714 skb->ip_summed = CHECKSUM_UNNECESSARY;
1715
1716 if (NET_RX_DROP == netif_rx(skb)) {
1717 priv->stats.rx_dropped++;
1718 CNTR_INC(priv, rx_dropped);
1719 }
1720 CNTR_INC(priv, netif_rx_cntr);
1721 }
1722
1723 if (p_bd != priv->p_ending_rbd)
1724 p_bd_next = p_bd + 1;
1725 else
1726 p_bd_next = priv->p_rbd;
1727 desc_status = qfec_bd_status_get(p_bd_next);
1728
1729 qfec_bd_skbuf_set(p_bd, NULL);
1730
1731 qfec_ring_head_adv(p_ring);
1732 p_bd = p_bd_next;
1733 }
1734
1735 priv->p_latest_rbd = p_bd;
1736
1737 /* replenish bufs */
1738 while (!qfec_ring_empty(p_ring)) {
1739 if (qfec_rbd_init(dev, &priv->p_rbd[qfec_ring_tail(p_ring)]))
1740 break;
1741 qfec_ring_tail_adv(p_ring);
1742 }
1743
1744 qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
1745}
1746
1747/*
1748 * isr() - interrupt service routine
1749 * determine cause of interrupt and invoke/schedule appropriate
1750 * processing or error handling
1751 */
1752#define ISR_ERR_CHK(priv, status, interrupt, cntr) \
1753 if (status & interrupt) \
1754 CNTR_INC(priv, cntr)
1755
1756static irqreturn_t qfec_int(int irq, void *dev_id)
1757{
1758 struct net_device *dev = dev_id;
1759 struct qfec_priv *priv = netdev_priv(dev);
1760 uint32_t status = qfec_reg_read(priv, STATUS_REG);
1761 uint32_t int_bits = STATUS_REG_NIS | STATUS_REG_AIS;
1762
1763 QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, dev->name);
1764
1765 /* abnormal interrupt */
1766 if (status & STATUS_REG_AIS) {
1767 QFEC_LOG(QFEC_LOG_DBG, "%s: abnormal status 0x%08x\n",
1768 __func__, status);
1769
1770 ISR_ERR_CHK(priv, status, STATUS_REG_RU, rx_buf_unavail);
1771 ISR_ERR_CHK(priv, status, STATUS_REG_FBI, fatal_bus);
1772
1773 ISR_ERR_CHK(priv, status, STATUS_REG_RWT, rx_watchdog);
1774 ISR_ERR_CHK(priv, status, STATUS_REG_RPS, rx_proc_stopped);
1775 ISR_ERR_CHK(priv, status, STATUS_REG_UNF, tx_underflow);
1776
1777 ISR_ERR_CHK(priv, status, STATUS_REG_OVF, rx_overflow);
1778 ISR_ERR_CHK(priv, status, STATUS_REG_TJT, tx_jabber_tmout);
1779 ISR_ERR_CHK(priv, status, STATUS_REG_TPS, tx_proc_stopped);
1780
1781 int_bits |= STATUS_REG_AIS_BITS;
1782 CNTR_INC(priv, abnorm_int);
1783 }
1784
1785 if (status & STATUS_REG_NIS)
1786 CNTR_INC(priv, norm_int);
1787
1788 /* receive interrupt */
1789 if (status & STATUS_REG_RI) {
1790 CNTR_INC(priv, rx_isr);
1791 qfec_rx_int(dev);
1792 }
1793
1794 /* transmit interrupt */
1795 if (status & STATUS_REG_TI) {
1796 CNTR_INC(priv, tx_isr);
1797 qfec_tx_replenish(dev);
1798 }
1799
1800 /* gmac interrupt */
1801 if (status & (STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI)) {
1802 status &= ~(STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI);
1803 CNTR_INC(priv, gmac_isr);
1804 int_bits |= STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI;
1805 qfec_reg_read(priv, SG_RG_SMII_STATUS_REG);
1806 }
1807
1808 /* clear interrupts */
1809 qfec_reg_write(priv, STATUS_REG, int_bits);
1810 CNTR_INC(priv, isr);
1811
1812 return IRQ_HANDLED;
1813}
1814
1815/*
1816 * open () - register system resources (IRQ, DMA, ...)
1817 * turn on HW, perform device setup.
1818 */
1819static int qfec_open(struct net_device *dev)
1820{
1821 struct qfec_priv *priv = netdev_priv(dev);
1822 struct buf_desc *p_bd;
1823 struct ring *p_ring;
1824 struct qfec_buf_desc *p_desc;
1825 int n;
1826 int res = 0;
1827
1828 QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
1829
1830 if (!dev) {
1831 res = -EINVAL;
1832 goto err;
1833 }
1834
1835 /* allocate TX/RX buffer-descriptors and buffers */
1836
1837 res = qfec_mem_alloc(dev);
1838 if (res)
1839 goto err;
1840
1841 /* initialize TX */
1842 p_desc = priv->bd_base;
1843
1844 for (n = 0, p_bd = priv->p_tbd; n < priv->n_tbd; n++, p_bd++) {
1845 p_bd->p_desc = p_desc++;
1846
1847 if (n == (priv->n_tbd - 1))
1848 qfec_bd_last_bd_set(p_bd);
1849
1850 qfec_bd_own_clr(p_bd); /* clear ownership */
1851 }
1852
1853 qfec_ring_init(&priv->ring_tbd, priv->n_tbd, priv->n_tbd);
1854
1855 priv->tx_ic_mod = priv->n_tbd / TX_BD_TI_RATIO;
1856 if (priv->tx_ic_mod == 0)
1857 priv->tx_ic_mod = 1;
1858
1859 /* initialize RX buffer descriptors and allocate sk_bufs */
1860 p_ring = &priv->ring_rbd;
1861 qfec_ring_init(p_ring, priv->n_rbd, 0);
1862 qfec_bd_last_bd_set(&priv->p_rbd[priv->n_rbd - 1]);
1863
1864 for (n = 0, p_bd = priv->p_rbd; n < priv->n_rbd; n++, p_bd++) {
1865 p_bd->p_desc = p_desc++;
1866
1867 if (qfec_rbd_init(dev, p_bd))
1868 break;
1869 qfec_ring_tail_adv(p_ring);
1870 }
1871
1872 priv->p_latest_rbd = priv->p_rbd;
1873 priv->p_ending_rbd = priv->p_rbd + priv->n_rbd - 1;
1874
1875 /* config ptp clock */
1876 qfec_ptp_cfg(priv);
1877
1878 /* configure PHY - must be set before reset/hw_init */
1879 priv->mii.supports_gmii = mii_check_gmii_support(&priv->mii);
1880 if (priv->mii.supports_gmii) {
1881 QFEC_LOG_ERR("%s: RGMII\n", __func__);
1882 qfec_intf_sel(priv, INTFC_RGMII);
1883 } else {
1884 QFEC_LOG_ERR("%s: MII\n", __func__);
1885 qfec_intf_sel(priv, INTFC_MII);
1886 }
1887
1888 /* initialize controller after BDs allocated */
1889 res = qfec_hw_init(priv);
1890 if (res)
1891 goto err1;
1892
1893 /* get/set (primary) MAC address */
1894 qfec_set_adr_regs(priv, dev->dev_addr);
1895 qfec_set_rx_mode(dev);
1896
1897 /* start phy monitor */
1898 QFEC_LOG(QFEC_LOG_DBG, " %s: start timer\n", __func__);
1899 netif_carrier_off(priv->net_dev);
1900 setup_timer(&priv->phy_tmr, qfec_phy_monitor, (unsigned long)dev);
1901 mod_timer(&priv->phy_tmr, jiffies + HZ);
1902
1903 /* driver supports AN capable PHY only */
1904 qfec_mdio_write(dev, priv->phy_id, MII_BMCR, BMCR_RESET);
1905 res = (BMCR_ANENABLE|BMCR_ANRESTART);
1906 qfec_mdio_write(dev, priv->phy_id, MII_BMCR, res);
1907
1908 /* initialize interrupts */
1909 QFEC_LOG(QFEC_LOG_DBG, " %s: request irq %d\n", __func__, dev->irq);
1910 res = request_irq(dev->irq, qfec_int, 0, dev->name, dev);
1911 if (res)
1912 goto err1;
1913
1914 /* enable controller */
1915 qfec_hw_enable(priv);
1916 netif_start_queue(dev);
1917
1918 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x link, %08x carrier\n", __func__,
1919 mii_link_ok(&priv->mii), netif_carrier_ok(priv->net_dev));
1920
1921 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1922 return 0;
1923
1924err1:
1925 qfec_mem_dealloc(dev);
1926err:
1927 QFEC_LOG_ERR("%s: error - %d\n", __func__, res);
1928 return res;
1929}
1930
1931/*
1932 * stop() - "reverse operations performed at open time"
1933 */
1934static int qfec_stop(struct net_device *dev)
1935{
1936 struct qfec_priv *priv = netdev_priv(dev);
1937 struct buf_desc *p_bd;
1938 struct sk_buff *skb;
1939 int n;
1940
1941 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1942
1943 del_timer_sync(&priv->phy_tmr);
1944
1945 qfec_hw_disable(priv);
1946 qfec_queue_stop(dev);
1947 free_irq(dev->irq, dev);
1948
1949 /* free all pending sk_bufs */
1950 for (n = priv->n_rbd, p_bd = priv->p_rbd; n > 0; n--, p_bd++) {
1951 skb = qfec_bd_skbuf_get(p_bd);
1952 if (skb)
1953 dev_kfree_skb(skb);
1954 }
1955
1956 for (n = priv->n_tbd, p_bd = priv->p_tbd; n > 0; n--, p_bd++) {
1957 skb = qfec_bd_skbuf_get(p_bd);
1958 if (skb)
1959 dev_kfree_skb(skb);
1960 }
1961
1962 qfec_mem_dealloc(dev);
1963
1964 QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
1965
1966 return 0;
1967}
1968
1969static int qfec_set_config(struct net_device *dev, struct ifmap *map)
1970{
1971 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
1972 return 0;
1973}
1974
1975/*
1976 * pass data from skbuf to buf-desc
1977 */
1978static int qfec_xmit(struct sk_buff *skb, struct net_device *dev)
1979{
1980 struct qfec_priv *priv = netdev_priv(dev);
1981 struct ring *p_ring = &priv->ring_tbd;
1982 struct buf_desc *p_bd;
1983 uint32_t ctrl = 0;
1984 int ret = NETDEV_TX_OK;
1985 unsigned long flags;
1986
1987 CNTR_INC(priv, xmit);
1988
1989 spin_lock_irqsave(&priv->xmit_lock, flags);
1990
1991 /* If there is no room, on the ring try to free some up */
1992 if (qfec_ring_room(p_ring) == 0)
1993 qfec_tx_replenish(dev);
1994
1995 /* stop queuing if no resources available */
1996 if (qfec_ring_room(p_ring) == 0) {
1997 qfec_queue_stop(dev);
1998 CNTR_INC(priv, tx_no_resource);
1999
2000 ret = NETDEV_TX_BUSY;
2001 goto done;
2002 }
2003
2004 /* locate and save *sk_buff */
2005 p_bd = &priv->p_tbd[qfec_ring_head(p_ring)];
2006 qfec_bd_skbuf_set(p_bd, skb);
2007
2008 /* set DMA ptr to sk_buff data and write cache to memory */
2009 qfec_bd_pbuf_set(p_bd, (void *)
2010 dma_map_single(&dev->dev,
2011 (void *)skb->data, skb->len, DMA_TO_DEVICE));
2012
2013 ctrl = skb->len;
2014 if (!(qfec_ring_head(p_ring) % priv->tx_ic_mod))
2015 ctrl |= BUF_TX_IC; /* interrupt on complete */
2016
2017 /* check if timestamping enabled and requested */
2018 if (priv->state & timestamping) {
2019 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2020 CNTR_INC(priv, ts_tx_en);
2021 ctrl |= BUF_TX_IC; /* interrupt on complete */
2022 ctrl |= BUF_TX_TTSE; /* enable timestamp */
2023 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2024 }
2025 }
2026
2027 if (qfec_bd_last_bd(p_bd))
2028 ctrl |= BUF_RX_RER;
2029
2030 /* no gather, no multi buf frames */
2031 ctrl |= BUF_TX_FS | BUF_TX_LS; /* 1st and last segment */
2032
2033 qfec_bd_ctl_wr(p_bd, ctrl);
2034 qfec_bd_status_set(p_bd, BUF_OWN);
2035
2036 qfec_ring_head_adv(p_ring);
2037 qfec_reg_write(priv, TX_POLL_DEM_REG, 1); /* poll */
2038
2039done:
2040 spin_unlock_irqrestore(&priv->xmit_lock, flags);
2041
2042 return ret;
2043}
2044
2045static int qfec_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2046{
2047 struct qfec_priv *priv = netdev_priv(dev);
2048 struct hwtstamp_config *cfg = (struct hwtstamp_config *) ifr;
2049
2050 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2051
2052 if (cmd == SIOCSHWTSTAMP) {
2053 CNTR_INC(priv, ts_ioctl);
2054 QFEC_LOG(QFEC_LOG_DBG,
2055 "%s: SIOCSHWTSTAMP - %x flags %x tx %x rx\n",
2056 __func__, cfg->flags, cfg->tx_type, cfg->rx_filter);
2057
2058 cfg->flags = 0;
2059 cfg->tx_type = HWTSTAMP_TX_ON;
2060 cfg->rx_filter = HWTSTAMP_FILTER_ALL;
2061
2062 priv->state |= timestamping;
2063 qfec_reg_write(priv, TS_CTL_REG,
2064 qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSENALL);
2065
2066 return 0;
2067 }
2068
2069 return generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL);
2070}
2071
2072static struct net_device_stats *qfec_get_stats(struct net_device *dev)
2073{
2074 struct qfec_priv *priv = netdev_priv(dev);
2075
2076 QFEC_LOG(QFEC_LOG_DBG2, "qfec_stats:\n");
2077
2078 priv->stats.multicast = qfec_reg_read(priv, NUM_MULTCST_FRM_RCVD_G);
2079
2080 return &priv->stats;
2081}
2082
2083/*
2084 * accept new mac address
2085 */
2086static int qfec_set_mac_address(struct net_device *dev, void *p)
2087{
2088 struct qfec_priv *priv = netdev_priv(dev);
2089 struct sockaddr *addr = p;
2090
2091 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2092
2093 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2094
2095 qfec_set_adr_regs(priv, dev->dev_addr);
2096
2097 return 0;
2098}
2099
2100/*
2101 * read discontinuous MAC address from corrected fuse memory region
2102 */
2103
2104static int qfec_get_mac_address(char *buf, char *mac_base, int nBytes)
2105{
2106 static int offset[] = { 0, 1, 2, 3, 4, 8 };
2107 int n;
2108
2109 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2110
2111 for (n = 0; n < nBytes; n++)
2112 buf[n] = ioread8(mac_base + offset[n]);
2113
2114 /* check that MAC programmed */
2115 if ((buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5]) == 0) {
2116 QFEC_LOG_ERR("%s: null MAC address\n", __func__);
2117 return -ENODATA;
2118 }
2119
2120 return 0;
2121}
2122
2123/*
2124 * static definition of driver functions
2125 */
2126static const struct net_device_ops qfec_netdev_ops = {
2127 .ndo_open = qfec_open,
2128 .ndo_stop = qfec_stop,
2129 .ndo_start_xmit = qfec_xmit,
2130
2131 .ndo_do_ioctl = qfec_do_ioctl,
2132 .ndo_tx_timeout = qfec_tx_timeout,
2133 .ndo_set_mac_address = qfec_set_mac_address,
2134 .ndo_set_rx_mode = qfec_set_rx_mode,
2135
2136 .ndo_change_mtu = eth_change_mtu,
2137 .ndo_validate_addr = eth_validate_addr,
2138
2139 .ndo_get_stats = qfec_get_stats,
2140 .ndo_set_config = qfec_set_config,
2141};
2142
2143/*
2144 * ethtool functions
2145 */
2146
2147static int qfec_nway_reset(struct net_device *dev)
2148{
2149 struct qfec_priv *priv = netdev_priv(dev);
2150 return mii_nway_restart(&priv->mii);
2151}
2152
2153/*
2154 * speed, duplex, auto-neg settings
2155 */
2156static void qfec_ethtool_getpauseparam(struct net_device *dev,
2157 struct ethtool_pauseparam *pp)
2158{
2159 struct qfec_priv *priv = netdev_priv(dev);
2160 u32 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
2161 u32 advert;
2162
2163 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2164
2165 /* report current settings */
2166 pp->tx_pause = (flow & FLOW_CONTROL_TFE) != 0;
2167 pp->rx_pause = (flow & FLOW_CONTROL_RFE) != 0;
2168
2169 /* report if pause is being advertised */
2170 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
2171 pp->autoneg =
2172 (advert & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2173}
2174
2175static int qfec_ethtool_setpauseparam(struct net_device *dev,
2176 struct ethtool_pauseparam *pp)
2177{
2178 struct qfec_priv *priv = netdev_priv(dev);
2179 u32 advert;
2180
2181 QFEC_LOG(QFEC_LOG_DBG, "%s: %d aneg, %d rx, %d tx\n", __func__,
2182 pp->autoneg, pp->rx_pause, pp->tx_pause);
2183
2184 advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
2185 advert &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2186
2187 /* If pause autonegotiation is enabled, but both rx and tx are not
2188 * because neither was specified in the ethtool cmd,
2189 * enable both symetrical and asymetrical pause.
2190 * otherwise, only enable the pause mode indicated by rx/tx.
2191 */
2192 if (pp->autoneg) {
2193 if (pp->rx_pause)
2194 advert |= ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP;
2195 else if (pp->tx_pause)
2196 advert |= ADVERTISE_PAUSE_ASYM;
2197 else
2198 advert |= ADVERTISE_PAUSE_CAP;
2199 }
2200
2201 qfec_mdio_write(dev, priv->phy_id, MII_ADVERTISE, advert);
2202
2203 return 0;
2204}
2205
2206/*
2207 * ethtool ring parameter (-g/G) support
2208 */
2209
2210/*
2211 * setringparamam - change the tx/rx ring lengths
2212 */
2213#define MIN_RING_SIZE 3
2214#define MAX_RING_SIZE 1000
2215static int qfec_ethtool_setringparam(struct net_device *dev,
2216 struct ethtool_ringparam *ring)
2217{
2218 struct qfec_priv *priv = netdev_priv(dev);
2219 u32 timeout = 20;
2220
2221 /* notify stack the link is down */
2222 netif_carrier_off(dev);
2223
2224 /* allow tx to complete & free skbufs on the tx ring */
2225 do {
2226 usleep_range(10000, 100000);
2227 qfec_tx_replenish(dev);
2228
2229 if (timeout-- == 0) {
2230 QFEC_LOG_ERR("%s: timeout\n", __func__);
2231 return -ETIME;
2232 }
2233 } while (!qfec_ring_empty(&priv->ring_tbd));
2234
2235
2236 qfec_stop(dev);
2237
2238 /* set tx ring size */
2239 if (ring->tx_pending < MIN_RING_SIZE)
2240 ring->tx_pending = MIN_RING_SIZE;
2241 else if (ring->tx_pending > MAX_RING_SIZE)
2242 ring->tx_pending = MAX_RING_SIZE;
2243 priv->n_tbd = ring->tx_pending;
2244
2245 /* set rx ring size */
2246 if (ring->rx_pending < MIN_RING_SIZE)
2247 ring->rx_pending = MIN_RING_SIZE;
2248 else if (ring->rx_pending > MAX_RING_SIZE)
2249 ring->rx_pending = MAX_RING_SIZE;
2250 priv->n_rbd = ring->rx_pending;
2251
2252
2253 qfec_open(dev);
2254
2255 return 0;
2256}
2257
2258/*
2259 * getringparamam - returns local values
2260 */
2261static void qfec_ethtool_getringparam(struct net_device *dev,
2262 struct ethtool_ringparam *ring)
2263{
2264 struct qfec_priv *priv = netdev_priv(dev);
2265
2266 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2267
2268 ring->rx_max_pending = MAX_RING_SIZE;
2269 ring->rx_mini_max_pending = 0;
2270 ring->rx_jumbo_max_pending = 0;
2271 ring->tx_max_pending = MAX_RING_SIZE;
2272
2273 ring->rx_pending = priv->n_rbd;
2274 ring->rx_mini_pending = 0;
2275 ring->rx_jumbo_pending = 0;
2276 ring->tx_pending = priv->n_tbd;
2277}
2278
2279/*
2280 * speed, duplex, auto-neg settings
2281 */
2282static int
2283qfec_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2284{
2285 struct qfec_priv *priv = netdev_priv(dev);
2286
2287 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2288
2289 cmd->maxrxpkt = priv->n_rbd;
2290 cmd->maxtxpkt = priv->n_tbd;
2291
2292 return mii_ethtool_gset(&priv->mii, cmd);
2293}
2294
2295static int
2296qfec_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
2297{
2298 struct qfec_priv *priv = netdev_priv(dev);
2299
2300 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2301
2302 return mii_ethtool_sset(&priv->mii, cmd);
2303}
2304
2305/*
2306 * msg/debug level
2307 */
2308static u32 qfec_ethtool_getmsglevel(struct net_device *dev)
2309{
2310 return qfec_debug;
2311}
2312
2313static void qfec_ethtool_setmsglevel(struct net_device *dev, u32 level)
2314{
2315 qfec_debug ^= level; /* toggle on/off */
2316}
2317
2318/*
2319 * register dump
2320 */
2321#define DMA_DMP_OFFSET 0x0000
2322#define DMA_REG_OFFSET 0x1000
2323#define DMA_REG_LEN 23
2324
2325#define MAC_DMP_OFFSET 0x0080
2326#define MAC_REG_OFFSET 0x0000
2327#define MAC_REG_LEN 55
2328
2329#define TS_DMP_OFFSET 0x0180
2330#define TS_REG_OFFSET 0x0700
2331#define TS_REG_LEN 15
2332
2333#define MDIO_DMP_OFFSET 0x0200
2334#define MDIO_REG_LEN 16
2335
2336#define REG_SIZE (MDIO_DMP_OFFSET + (MDIO_REG_LEN * sizeof(short)))
2337
2338static int qfec_ethtool_getregs_len(struct net_device *dev)
2339{
2340 return REG_SIZE;
2341}
2342
2343static void
2344qfec_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
2345 void *buf)
2346{
2347 struct qfec_priv *priv = netdev_priv(dev);
2348 u32 *data = buf;
2349 u16 *data16;
2350 unsigned int i;
2351 unsigned int j;
2352 unsigned int n;
2353
2354 memset(buf, 0, REG_SIZE);
2355
2356 j = DMA_DMP_OFFSET / sizeof(u32);
2357 for (i = DMA_REG_OFFSET, n = DMA_REG_LEN; n--; i += sizeof(u32))
2358 data[j++] = htonl(qfec_reg_read(priv, i));
2359
2360 j = MAC_DMP_OFFSET / sizeof(u32);
2361 for (i = MAC_REG_OFFSET, n = MAC_REG_LEN; n--; i += sizeof(u32))
2362 data[j++] = htonl(qfec_reg_read(priv, i));
2363
2364 j = TS_DMP_OFFSET / sizeof(u32);
2365 for (i = TS_REG_OFFSET, n = TS_REG_LEN; n--; i += sizeof(u32))
2366 data[j++] = htonl(qfec_reg_read(priv, i));
2367
2368 data16 = (u16 *)&data[MDIO_DMP_OFFSET / sizeof(u32)];
2369 for (i = 0, n = 0; i < MDIO_REG_LEN; i++)
2370 data16[n++] = htons(qfec_mdio_read(dev, 0, i));
2371
2372 regs->len = REG_SIZE;
2373
2374 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, regs->len);
2375}
2376
2377/*
2378 * statistics
2379 * return counts of various ethernet activity.
2380 * many of these are same as in struct net_device_stats
2381 *
2382 * missed-frames indicates the number of attempts made by the ethernet
2383 * controller to write to a buffer-descriptor when the BD ownership
2384 * bit was not set. The rxfifooverflow counter (0x1D4) is not
2385 * available. The Missed Frame and Buffer Overflow Counter register
2386 * (0x1020) is used, but has only 16-bits and is reset when read.
2387 * It is read and updates the value in priv->stats.rx_missed_errors
2388 * in qfec_rx_int().
2389 */
2390static char qfec_stats_strings[][ETH_GSTRING_LEN] = {
2391 "TX good/bad Bytes ",
2392 "TX Bytes ",
2393 "TX good/bad Frames ",
2394 "TX Bcast Frames ",
2395 "TX Mcast Frames ",
2396 "TX Unicast Frames ",
2397 "TX Pause Frames ",
2398 "TX Vlan Frames ",
2399 "TX Frames 64 ",
2400 "TX Frames 65-127 ",
2401 "TX Frames 128-255 ",
2402 "TX Frames 256-511 ",
2403 "TX Frames 512-1023 ",
2404 "TX Frames 1024+ ",
2405 "TX Pause Frames ",
2406 "TX Collisions ",
2407 "TX Late Collisions ",
2408 "TX Excessive Collisions ",
2409
2410 "RX good/bad Bytes ",
2411 "RX Bytes ",
2412 "RX good/bad Frames ",
2413 "RX Bcast Frames ",
2414 "RX Mcast Frames ",
2415 "RX Unicast Frames ",
2416 "RX Pause Frames ",
2417 "RX Vlan Frames ",
2418 "RX Frames 64 ",
2419 "RX Frames 65-127 ",
2420 "RX Frames 128-255 ",
2421 "RX Frames 256-511 ",
2422 "RX Frames 512-1023 ",
2423 "RX Frames 1024+ ",
2424 "RX Pause Frames ",
2425 "RX Crc error Frames ",
2426 "RX Length error Frames ",
2427 "RX Alignment error Frames ",
2428 "RX Runt Frames ",
2429 "RX Oversize Frames ",
2430 "RX Missed Frames ",
2431
2432};
2433
2434static u32 qfec_stats_regs[] = {
2435
2436 69, 89, 70, 71, 72, 90, 92, 93,
2437 73, 74, 75, 76, 77, 78, 92, 84,
2438 86, 87,
2439
2440 97, 98, 96, 99, 100, 113, 116, 118,
2441 107, 108, 109, 110, 111, 112, 116, 101,
2442 114, 102, 103, 106
2443};
2444
2445static int qfec_stats_show(struct device *dev, struct device_attribute *attr,
2446 char *buf)
2447{
2448 struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
2449 int count = PAGE_SIZE;
2450 int l = 0;
2451 int n;
2452
2453 QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
2454
2455 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++) {
2456 l += snprintf(&buf[l], count - l, " %12u %s\n",
2457 qfec_reg_read(priv,
2458 qfec_stats_regs[n] * sizeof(uint32_t)),
2459 qfec_stats_strings[n]);
2460 }
2461
2462 return l;
2463}
2464
2465static int qfec_get_sset_count(struct net_device *dev, int sset)
2466{
2467 switch (sset) {
2468 case ETH_SS_STATS:
2469 return ARRAY_SIZE(qfec_stats_regs) + 1; /* missed frames */
2470
2471 default:
2472 return -EOPNOTSUPP;
2473 }
2474}
2475
2476static void qfec_ethtool_getstrings(struct net_device *dev, u32 stringset,
2477 u8 *buf)
2478{
2479 QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__,
2480 sizeof(qfec_stats_strings));
2481
2482 memcpy(buf, qfec_stats_strings, sizeof(qfec_stats_strings));
2483}
2484
2485static void qfec_ethtool_getstats(struct net_device *dev,
2486 struct ethtool_stats *stats, uint64_t *data)
2487{
2488 struct qfec_priv *priv = netdev_priv(dev);
2489 int j = 0;
2490 int n;
2491
2492 for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)
2493 data[j++] = qfec_reg_read(priv,
2494 qfec_stats_regs[n] * sizeof(uint32_t));
2495
2496 data[j++] = priv->stats.rx_missed_errors;
2497
2498 stats->n_stats = j;
2499}
2500
2501static void qfec_ethtool_getdrvinfo(struct net_device *dev,
2502 struct ethtool_drvinfo *info)
2503{
2504 strlcpy(info->driver, QFEC_NAME, sizeof(info->driver));
2505 strlcpy(info->version, QFEC_DRV_VER, sizeof(info->version));
2506 strlcpy(info->bus_info, dev_name(dev->dev.parent),
2507 sizeof(info->bus_info));
2508
2509 info->eedump_len = 0;
2510 info->regdump_len = qfec_ethtool_getregs_len(dev);
2511}
2512
2513/*
2514 * ethtool ops table
2515 */
2516static const struct ethtool_ops qfec_ethtool_ops = {
2517 .nway_reset = qfec_nway_reset,
2518
2519 .get_settings = qfec_ethtool_getsettings,
2520 .set_settings = qfec_ethtool_setsettings,
2521 .get_link = ethtool_op_get_link,
2522 .get_drvinfo = qfec_ethtool_getdrvinfo,
2523 .get_msglevel = qfec_ethtool_getmsglevel,
2524 .set_msglevel = qfec_ethtool_setmsglevel,
2525 .get_regs_len = qfec_ethtool_getregs_len,
2526 .get_regs = qfec_ethtool_getregs,
2527
2528 .get_ringparam = qfec_ethtool_getringparam,
2529 .set_ringparam = qfec_ethtool_setringparam,
2530
2531 .get_pauseparam = qfec_ethtool_getpauseparam,
2532 .set_pauseparam = qfec_ethtool_setpauseparam,
2533
2534 .get_sset_count = qfec_get_sset_count,
2535 .get_strings = qfec_ethtool_getstrings,
2536 .get_ethtool_stats = qfec_ethtool_getstats,
2537};
2538
2539/*
2540 * create sysfs entries
2541 */
2542static DEVICE_ATTR(bd_tx, 0444, qfec_bd_tx_show, NULL);
2543static DEVICE_ATTR(bd_rx, 0444, qfec_bd_rx_show, NULL);
2544static DEVICE_ATTR(cfg, 0444, qfec_config_show, NULL);
2545static DEVICE_ATTR(clk_reg, 0444, qfec_clk_reg_show, NULL);
2546static DEVICE_ATTR(cmd, 0222, NULL, qfec_cmd);
2547static DEVICE_ATTR(cntrs, 0444, qfec_cntrs_show, NULL);
2548static DEVICE_ATTR(reg, 0444, qfec_reg_show, NULL);
2549static DEVICE_ATTR(mdio, 0444, qfec_mdio_show, NULL);
2550static DEVICE_ATTR(stats, 0444, qfec_stats_show, NULL);
2551static DEVICE_ATTR(tstamp, 0444, qfec_tstamp_show, NULL);
2552
2553static void qfec_sysfs_create(struct net_device *dev)
2554{
2555 if (device_create_file(&(dev->dev), &dev_attr_bd_tx) ||
2556 device_create_file(&(dev->dev), &dev_attr_bd_rx) ||
2557 device_create_file(&(dev->dev), &dev_attr_cfg) ||
2558 device_create_file(&(dev->dev), &dev_attr_clk_reg) ||
2559 device_create_file(&(dev->dev), &dev_attr_cmd) ||
2560 device_create_file(&(dev->dev), &dev_attr_cntrs) ||
2561 device_create_file(&(dev->dev), &dev_attr_mdio) ||
2562 device_create_file(&(dev->dev), &dev_attr_reg) ||
2563 device_create_file(&(dev->dev), &dev_attr_stats) ||
2564 device_create_file(&(dev->dev), &dev_attr_tstamp))
2565 pr_err("qfec_sysfs_create failed to create sysfs files\n");
2566}
2567
2568/*
2569 * map a specified resource
2570 */
2571static int qfec_map_resource(struct platform_device *plat, int resource,
2572 struct resource **priv_res,
2573 void **addr)
2574{
2575 struct resource *res;
2576
2577 QFEC_LOG(QFEC_LOG_DBG, "%s: 0x%x resource\n", __func__, resource);
2578
2579 /* allocate region to access controller registers */
2580 *priv_res = res = platform_get_resource(plat, resource, 0);
2581 if (!res) {
2582 QFEC_LOG_ERR("%s: platform_get_resource failed\n", __func__);
2583 return -ENODEV;
2584 }
2585
2586 res = request_mem_region(res->start, res->end - res->start, QFEC_NAME);
2587 if (!res) {
2588 QFEC_LOG_ERR("%s: request_mem_region failed, %08x %08x\n",
2589 __func__, res->start, res->end - res->start);
2590 return -EBUSY;
2591 }
2592
2593 *addr = ioremap(res->start, res->end - res->start);
2594 if (!*addr)
2595 return -ENOMEM;
2596
2597 QFEC_LOG(QFEC_LOG_DBG, " %s: io mapped from %p to %p\n",
2598 __func__, (void *)res->start, *addr);
2599
2600 return 0;
2601};
2602
2603/*
2604 * free allocated io regions
2605 */
2606static void qfec_free_res(struct resource *res, void *base)
2607{
2608
2609 if (res) {
2610 if (base)
2611 iounmap((void __iomem *)base);
2612
2613 release_mem_region(res->start, res->end - res->start);
2614 }
2615};
2616
2617/*
2618 * probe function that obtain configuration info and allocate net_device
2619 */
2620static int __devinit qfec_probe(struct platform_device *plat)
2621{
2622 struct net_device *dev;
2623 struct qfec_priv *priv;
2624 int ret = 0;
2625
2626 /* allocate device */
2627 dev = alloc_etherdev(sizeof(struct qfec_priv));
2628 if (!dev) {
2629 QFEC_LOG_ERR("%s: alloc_etherdev failed\n", __func__);
2630 ret = -ENOMEM;
2631 goto err;
2632 }
2633
2634 QFEC_LOG(QFEC_LOG_DBG, "%s: %08x dev\n", __func__, (int)dev);
2635
2636 qfec_dev = dev;
2637 SET_NETDEV_DEV(dev, &plat->dev);
2638
2639 dev->netdev_ops = &qfec_netdev_ops;
2640 dev->ethtool_ops = &qfec_ethtool_ops;
2641 dev->watchdog_timeo = 2 * HZ;
2642 dev->irq = platform_get_irq(plat, 0);
2643
2644 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2645
2646 /* initialize private data */
2647 priv = (struct qfec_priv *)netdev_priv(dev);
2648 memset((void *)priv, 0, sizeof(priv));
2649
2650 priv->net_dev = dev;
2651 platform_set_drvdata(plat, dev);
2652
2653 priv->n_tbd = TX_BD_NUM;
2654 priv->n_rbd = RX_BD_NUM;
2655
2656 /* initialize phy structure */
2657 priv->mii.phy_id_mask = 0x1F;
2658 priv->mii.reg_num_mask = 0x1F;
2659 priv->mii.dev = dev;
2660 priv->mii.mdio_read = qfec_mdio_read;
2661 priv->mii.mdio_write = qfec_mdio_write;
2662
2663 /* map register regions */
2664 ret = qfec_map_resource(
2665 plat, IORESOURCE_MEM, &priv->mac_res, &priv->mac_base);
2666 if (ret) {
2667 QFEC_LOG_ERR("%s: IORESOURCE_MEM mac failed\n", __func__);
2668 goto err1;
2669 }
2670
2671 ret = qfec_map_resource(
2672 plat, IORESOURCE_IO, &priv->clk_res, &priv->clk_base);
2673 if (ret) {
2674 QFEC_LOG_ERR("%s: IORESOURCE_IO clk failed\n", __func__);
2675 goto err2;
2676 }
2677
2678 ret = qfec_map_resource(
2679 plat, IORESOURCE_DMA, &priv->fuse_res, &priv->fuse_base);
2680 if (ret) {
2681 QFEC_LOG_ERR("%s: IORESOURCE_DMA fuse failed\n", __func__);
2682 goto err3;
2683 }
2684
2685 /* initialize MAC addr */
2686 ret = qfec_get_mac_address(dev->dev_addr, priv->fuse_base,
2687 MAC_ADDR_SIZE);
2688 if (ret)
2689 goto err4;
2690
2691 QFEC_LOG(QFEC_LOG_DBG, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
2692 __func__,
2693 dev->dev_addr[0], dev->dev_addr[1],
2694 dev->dev_addr[2], dev->dev_addr[3],
2695 dev->dev_addr[4], dev->dev_addr[5]);
2696
2697 ret = register_netdev(dev);
2698 if (ret) {
2699 QFEC_LOG_ERR("%s: register_netdev failed\n", __func__);
2700 goto err4;
2701 }
2702
2703 spin_lock_init(&priv->mdio_lock);
2704 spin_lock_init(&priv->xmit_lock);
2705 qfec_sysfs_create(dev);
2706
2707 return 0;
2708
2709 /* error handling */
2710err4:
2711 qfec_free_res(priv->fuse_res, priv->fuse_base);
2712err3:
2713 qfec_free_res(priv->clk_res, priv->clk_base);
2714err2:
2715 qfec_free_res(priv->mac_res, priv->mac_base);
2716err1:
2717 free_netdev(dev);
2718err:
2719 QFEC_LOG_ERR("%s: err\n", __func__);
2720 return ret;
2721}
2722
2723/*
2724 * module remove
2725 */
2726static int __devexit qfec_remove(struct platform_device *plat)
2727{
2728 struct net_device *dev = platform_get_drvdata(plat);
2729 struct qfec_priv *priv = netdev_priv(dev);
2730
2731 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2732
2733 platform_set_drvdata(plat, NULL);
2734
2735 qfec_free_res(priv->fuse_res, priv->fuse_base);
2736 qfec_free_res(priv->clk_res, priv->clk_base);
2737 qfec_free_res(priv->mac_res, priv->mac_base);
2738
2739 unregister_netdev(dev);
2740 free_netdev(dev);
2741
2742 return 0;
2743}
2744
2745/*
2746 * module support
2747 * the FSM9xxx is not a mobile device does not support power management
2748 */
2749
2750static struct platform_driver qfec_driver = {
2751 .probe = qfec_probe,
2752 .remove = __devexit_p(qfec_remove),
2753 .driver = {
2754 .name = QFEC_NAME,
2755 .owner = THIS_MODULE,
2756 },
2757};
2758
2759/*
2760 * module init
2761 */
2762static int __init qfec_init_module(void)
2763{
2764 int res;
2765
2766 QFEC_LOG(QFEC_LOG_DBG, "%s: %s\n", __func__, qfec_driver.driver.name);
2767
2768 res = platform_driver_register(&qfec_driver);
2769
2770 QFEC_LOG(QFEC_LOG_DBG, "%s: %d - platform_driver_register\n",
2771 __func__, res);
2772
2773 return res;
2774}
2775
2776/*
2777 * module exit
2778 */
2779static void __exit qfec_exit_module(void)
2780{
2781 QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
2782
2783 platform_driver_unregister(&qfec_driver);
2784}
2785
2786MODULE_DESCRIPTION("FSM Network Driver");
2787MODULE_LICENSE("GPL v2");
2788MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
2789MODULE_VERSION("1.0");
2790
2791module_init(qfec_init_module);
2792module_exit(qfec_exit_module);