Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4
AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.
* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
PRNG: Device tree entry for qrng device.
vidc:1080p: Set video core timeout value for Thumbnail mode
msm: sps: improve the debugging support in SPS driver
board-8064 msm: Overlap secure and non secure video firmware heaps.
msm: clock: Add handoff ops for 7x30 and copper XO clocks
msm_fb: display: Wait for external vsync before DTV IOMMU unmap
msm: Fix ciruclar dependency in debug UART settings
msm: gdsc: Add GDSC regulator driver for msm-copper
defconfig: Enable Mobicore Driver.
mobicore: Add mobicore driver.
mobicore: rename variable to lower case.
mobicore: rename folder.
mobicore: add makefiles
mobicore: initial import of kernel driver
ASoC: msm: Add SLIMBUS_2_RX CPU DAI
board-8064-gpio: Update FUNC for EPM SPI CS
msm_fb: display: Remove chicken bit config during video playback
mmc: msm_sdcc: enable the sanitize capability
msm-fb: display: lm2 writeback support on mpq platfroms
msm_fb: display: Disable LVDS phy & pll during panel off
...
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c63a64c..c82f7d1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -98,6 +98,7 @@
emulated by the MIPS Simulator.
If you are not using a MIPSsim or are unsure, say N.
+source "drivers/net/ethernet/msm/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 9676a51..50fdf5e 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,6 +41,7 @@
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
+obj-$(CONFIG_ARCH_MSM) += msm/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig
new file mode 100644
index 0000000..095cb4d
--- /dev/null
+++ b/drivers/net/ethernet/msm/Kconfig
@@ -0,0 +1,52 @@
+#
+# msm network device configuration
+#
+
+config MSM_RMNET
+ tristate "MSM RMNET Virtual Network Device"
+ depends on ARCH_MSM
+ default y
+ help
+ Virtual ethernet interface for MSM RMNET transport.
+
+config MSM_RMNET_SDIO
+ bool "RMNET SDIO Driver"
+ depends on MSM_SDIO_DMUX
+ default n
+ help
+ Implements RMNET over SDIO interface.
+
+config MSM_RMNET_BAM
+ bool "RMNET BAM Driver"
+ depends on MSM_BAM_DMUX
+ default n
+ help
+ Implements RMNET over BAM interface.
+ RMNET provides a virtual ethernet interface
+ for routing IP packets within the MSM using
+ BAM as a physical transport.
+
+config MSM_RMNET_SMUX
+ bool "RMNET SMUX Driver"
+ depends on N_SMUX
+ help
+ Implements RMNET over SMUX interface.
+ RMNET provides a virtual ethernet interface
+ for routing IP packets within the MSM using
+ HSUART as a physical transport.
+
+config MSM_RMNET_DEBUG
+ bool "MSM RMNET debug interface"
+ depends on MSM_RMNET
+ default n
+ help
+ Debug stats on wakeup counts.
+
+config QFEC
+ tristate "QFEC ethernet driver"
+ select MII
+ depends on ARM
+ help
+ This driver supports Ethernet in the FSM9xxx.
+ To compile this driver as a module, choose M here: the
+ module will be called qfec.
diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile
new file mode 100644
index 0000000..7d9d4c6
--- /dev/null
+++ b/drivers/net/ethernet/msm/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the msm networking support.
+#
+
+obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o
+obj-$(CONFIG_MSM_RMNET_SDIO) += msm_rmnet_sdio.o
+obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o
+obj-$(CONFIG_MSM_RMNET_SMUX) += msm_rmnet_smux.o
+obj-$(CONFIG_QFEC) += qfec.o
diff --git a/drivers/net/ethernet/msm/msm_rmnet.c b/drivers/net/ethernet/msm/msm_rmnet.c
new file mode 100644
index 0000000..61df241
--- /dev/null
+++ b/drivers/net/ethernet/msm/msm_rmnet.c
@@ -0,0 +1,846 @@
+/* linux/drivers/net/msm_rmnet.c
+ *
+ * Virtual Ethernet Interface for MSM7K Networking
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/platform_device.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <mach/msm_smd.h>
+#include <mach/peripheral-loader.h>
+
+/* Debug message support */
+static int msm_rmnet_debug_mask;
+module_param_named(debug_enable, msm_rmnet_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do { \
+ if (msm_rmnet_debug_mask & m) \
+ pr_info(x); \
+} while (0)
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_DEVICE_COUNT (8)
+static const char *ch_name[RMNET_DEVICE_COUNT] = {
+ "DATA5",
+ "DATA6",
+ "DATA7",
+ "DATA8",
+ "DATA9",
+ "DATA12",
+ "DATA13",
+ "DATA14",
+};
+
+/* XXX should come from smd headers */
+#define SMD_PORT_ETHER0 11
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define HEADROOM_FOR_QOS 8
+
+static struct completion *port_complete[RMNET_DEVICE_COUNT];
+
+struct rmnet_private
+{
+ smd_channel_t *ch;
+ struct net_device_stats stats;
+ const char *chname;
+ struct wake_lock wake_lock;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ ktime_t last_packet;
+ unsigned long wakeups_xmit;
+ unsigned long wakeups_rcv;
+ unsigned long timeout_us;
+#endif
+ struct sk_buff *skb;
+ spinlock_t lock;
+ struct tasklet_struct tsklt;
+ u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
+ struct platform_driver pdrv;
+ struct completion complete;
+ void *pil;
+ struct mutex pil_lock;
+};
+
+static uint msm_rmnet_modem_wait;
+module_param_named(modem_wait, msm_rmnet_modem_wait,
+ uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static int count_this_packet(void *_hdr, int len)
+{
+ struct ethhdr *hdr = _hdr;
+
+ if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+ return 0;
+
+ return 1;
+}
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n)
+{
+ timeout_suspend_us = simple_strtoul(buf, NULL, 10);
+ return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler) {
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_suspend_us;
+ }
+}
+
+static void rmnet_late_resume(struct early_suspend *handler) {
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_us;
+ }
+}
+
+static struct early_suspend rmnet_power_suspend = {
+ .suspend = rmnet_early_suspend,
+ .resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+ register_early_suspend(&rmnet_power_suspend);
+ return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p) {
+ int ret = 0;
+ ktime_t now;
+ if (p->timeout_us == 0) /* Check if disabled */
+ return 0;
+
+ /* Use real (wall) time. */
+ now = ktime_get_real();
+
+ if (ktime_us_delta(now, p->last_packet) > p->timeout_us) {
+ ret = 1;
+ }
+ p->last_packet = now;
+ return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return sprintf(buf, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return sprintf(buf, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p->timeout_us = timeout_us = simple_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+ timeout_us = simple_strtoul(buf, NULL, 10);
+#endif
+ return n;
+}
+
+static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p = netdev_priv(to_net_dev(d));
+ return sprintf(buf, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ __be16 protocol = 0;
+
+ skb->dev = dev;
+
+ /* Determine L3 protocol */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+ dev->name, skb->data[0] & 0xf0);
+ /* skb will be dropped in uppder layer for unknown protocol */
+ }
+ return protocol;
+}
+
+static void smd_net_data_handler(unsigned long arg);
+static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);
+
+/* Called in soft-irq context */
+static void smd_net_data_handler(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *) arg;
+ struct rmnet_private *p = netdev_priv(dev);
+ struct sk_buff *skb;
+ void *ptr = 0;
+ int sz;
+ u32 opmode = p->operation_mode;
+ unsigned long flags;
+
+ for (;;) {
+ sz = smd_cur_packet_size(p->ch);
+ if (sz == 0) break;
+ if (smd_read_avail(p->ch) < sz) break;
+
+ skb = dev_alloc_skb(sz + NET_IP_ALIGN);
+ if (skb == NULL) {
+ pr_err("[%s] rmnet_recv() cannot allocate skb\n",
+ dev->name);
+ /* out of memory, reschedule a later attempt */
+ smd_net_data_tasklet.data = (unsigned long)dev;
+ tasklet_schedule(&smd_net_data_tasklet);
+ break;
+ } else {
+ skb->dev = dev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ ptr = skb_put(skb, sz);
+ wake_lock_timeout(&p->wake_lock, HZ / 2);
+ if (smd_read(p->ch, ptr, sz) != sz) {
+ pr_err("[%s] rmnet_recv() smd lied about avail?!",
+ dev->name);
+ ptr = 0;
+ dev_kfree_skb_irq(skb);
+ } else {
+ /* Handle Rx frame format */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_IP(opmode)) {
+ /* Driver in IP mode */
+ skb->protocol =
+ rmnet_ip_type_trans(skb, dev);
+ } else {
+ /* Driver in Ethernet mode */
+ skb->protocol =
+ eth_type_trans(skb, dev);
+ }
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(ptr, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_rcv +=
+ rmnet_cause_wakeup(p);
+#endif
+ p->stats.rx_packets++;
+ p->stats.rx_bytes += skb->len;
+ }
+ DBG1("[%s] Rx packet #%lu len=%d\n",
+ dev->name, p->stats.rx_packets,
+ skb->len);
+
+ /* Deliver to network stack */
+ netif_rx(skb);
+ }
+ continue;
+ }
+ if (smd_read(p->ch, ptr, sz) != sz)
+ pr_err("[%s] rmnet_recv() smd lied about avail?!",
+ dev->name);
+ }
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ smd_channel_t *ch = p->ch;
+ int smd_ret;
+ struct QMI_QOS_HDR_S *qmih;
+ u32 opmode;
+ unsigned long flags;
+
+ /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_QOS(opmode)) {
+ qmih = (struct QMI_QOS_HDR_S *)
+ skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+ qmih->version = 1;
+ qmih->flags = 0;
+ qmih->flow_id = skb->mark;
+ }
+
+ dev->trans_start = jiffies;
+ smd_ret = smd_write(ch, skb->data, skb->len);
+ if (smd_ret != skb->len) {
+ pr_err("[%s] %s: smd_write returned error %d",
+ dev->name, __func__, smd_ret);
+ p->stats.tx_errors++;
+ goto xmit_out;
+ }
+
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+ p->stats.tx_packets++;
+ p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+ }
+ DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+ dev->name, p->stats.tx_packets, skb->len, skb->mark);
+
+xmit_out:
+ /* data xmited, safe to release skb */
+ dev_kfree_skb_irq(skb);
+ return 0;
+}
+
+static void _rmnet_resume_flow(unsigned long param)
+{
+ struct net_device *dev = (struct net_device *)param;
+ struct rmnet_private *p = netdev_priv(dev);
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+
+ /* xmit and enable the flow only once even if
+ multiple tasklets were scheduled by smd_net_notify */
+ spin_lock_irqsave(&p->lock, flags);
+ if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
+ skb = p->skb;
+ p->skb = NULL;
+ spin_unlock_irqrestore(&p->lock, flags);
+ _rmnet_xmit(skb, dev);
+ netif_wake_queue(dev);
+ } else
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void msm_rmnet_unload_modem(void *pil)
+{
+ if (pil)
+ pil_put(pil);
+}
+
+static void *msm_rmnet_load_modem(struct net_device *dev)
+{
+ void *pil;
+ int rc;
+ struct rmnet_private *p = netdev_priv(dev);
+
+ pil = pil_get("modem");
+ if (IS_ERR(pil))
+ pr_err("[%s] %s: modem load failed\n",
+ dev->name, __func__);
+ else if (msm_rmnet_modem_wait) {
+ rc = wait_for_completion_interruptible_timeout(
+ &p->complete,
+ msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
+ if (!rc)
+ rc = -ETIMEDOUT;
+ if (rc < 0) {
+ pr_err("[%s] %s: wait for rmnet port failed %d\n",
+ dev->name, __func__, rc);
+ msm_rmnet_unload_modem(pil);
+ pil = ERR_PTR(rc);
+ }
+ }
+
+ return pil;
+}
+
+static void smd_net_notify(void *_dev, unsigned event)
+{
+ struct rmnet_private *p = netdev_priv((struct net_device *)_dev);
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ spin_lock(&p->lock);
+ if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
+ smd_disable_read_intr(p->ch);
+ tasklet_hi_schedule(&p->tsklt);
+ }
+
+ spin_unlock(&p->lock);
+
+ if (smd_read_avail(p->ch) &&
+ (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) {
+ smd_net_data_tasklet.data = (unsigned long) _dev;
+ tasklet_schedule(&smd_net_data_tasklet);
+ }
+ break;
+
+ case SMD_EVENT_OPEN:
+ DBG0("%s: opening SMD port\n", __func__);
+ netif_carrier_on(_dev);
+ if (netif_queue_stopped(_dev)) {
+ DBG0("%s: re-starting if queue\n", __func__);
+ netif_wake_queue(_dev);
+ }
+ break;
+
+ case SMD_EVENT_CLOSE:
+ DBG0("%s: closing SMD port\n", __func__);
+ netif_carrier_off(_dev);
+ break;
+ }
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+ int r;
+ void *pil;
+ struct rmnet_private *p = netdev_priv(dev);
+
+ mutex_lock(&p->pil_lock);
+ if (!p->pil) {
+ pil = msm_rmnet_load_modem(dev);
+ if (IS_ERR(pil)) {
+ mutex_unlock(&p->pil_lock);
+ return PTR_ERR(pil);
+ }
+ p->pil = pil;
+ }
+ mutex_unlock(&p->pil_lock);
+
+ if (!p->ch) {
+ r = smd_open(p->chname, &p->ch, dev, smd_net_notify);
+
+ if (r < 0)
+ return -ENODEV;
+ }
+
+ smd_disable_read_intr(p->ch);
+ return 0;
+}
+
+static int __rmnet_close(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ if (p->ch) {
+ rc = smd_close(p->ch);
+ spin_lock_irqsave(&p->lock, flags);
+ p->ch = 0;
+ spin_unlock_irqrestore(&p->lock, flags);
+ return rc;
+ } else
+ return -EBADF;
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+ int rc = 0;
+
+ DBG0("[%s] rmnet_open()\n", dev->name);
+
+ rc = __rmnet_open(dev);
+ if (rc == 0)
+ netif_start_queue(dev);
+
+ return rc;
+}
+
+static int rmnet_stop(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+
+ DBG0("[%s] rmnet_stop()\n", dev->name);
+
+ netif_stop_queue(dev);
+ tasklet_kill(&p->tsklt);
+
+ /* TODO: unload modem safely,
+ currently, this causes unnecessary unloads */
+ /*
+ mutex_lock(&p->pil_lock);
+ msm_rmnet_unload_modem(p->pil);
+ p->pil = NULL;
+ mutex_unlock(&p->pil_lock);
+ */
+
+ return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+ return -EINVAL;
+
+ DBG0("[%s] MTU change: old=%d new=%d\n",
+ dev->name, dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ smd_channel_t *ch = p->ch;
+ unsigned long flags;
+
+ if (netif_queue_stopped(dev)) {
+ pr_err("[%s] fatal: rmnet_xmit called when netif_queue is stopped",
+ dev->name);
+ return 0;
+ }
+
+ spin_lock_irqsave(&p->lock, flags);
+ smd_enable_read_intr(ch);
+ if (smd_write_avail(ch) < skb->len) {
+ netif_stop_queue(dev);
+ p->skb = skb;
+ spin_unlock_irqrestore(&p->lock, flags);
+ return 0;
+ }
+ smd_disable_read_intr(ch);
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ _rmnet_xmit(skb, dev);
+
+ return 0;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+ pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+
+static const struct net_device_ops rmnet_ops_ether = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_set_rx_mode = rmnet_set_multicast_list,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_set_rx_mode = rmnet_set_multicast_list,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ u32 old_opmode = p->operation_mode;
+ unsigned long flags;
+ int prev_mtu = dev->mtu;
+ int rc = 0;
+
+ /* Process IOCTL command */
+ switch (cmd) {
+ case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
+ /* Perform Ethernet config only if in IP mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_IP) {
+ ether_setup(dev);
+ random_ether_addr(dev->dev_addr);
+ dev->mtu = prev_mtu;
+
+ dev->netdev_ops = &rmnet_ops_ether;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_IP;
+ p->operation_mode |= RMNET_MODE_LLP_ETH;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): "
+ "set Ethernet protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
+ /* Perform IP config only if in Ethernet mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+ /* Undo config done in ether_setup() */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = prev_mtu;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST|
+ IFF_MULTICAST);
+
+ dev->netdev_ops = &rmnet_ops_ip;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+ p->operation_mode |= RMNET_MODE_LLP_IP;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode &
+ (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+ break;
+
+ case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode |= RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode & RMNET_MODE_QOS);
+ break;
+
+ case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
+ ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+ break;
+
+ case RMNET_IOCTL_OPEN: /* Open transport port */
+ rc = __rmnet_open(dev);
+ DBG0("[%s] rmnet_ioctl(): open transport port\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_CLOSE: /* Close transport port */
+ rc = __rmnet_close(dev);
+ DBG0("[%s] rmnet_ioctl(): close transport port\n",
+ dev->name);
+ break;
+
+ default:
+ pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+ dev->name, cmd);
+ return -EINVAL;
+ }
+
+ DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+ dev->name, __func__, cmd, old_opmode, p->operation_mode);
+ return rc;
+}
+
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+ /* Using Ethernet mode by default */
+ dev->netdev_ops = &rmnet_ops_ether;
+ ether_setup(dev);
+
+ /* set this after calling ether_setup */
+ dev->mtu = RMNET_DATA_LEN;
+ dev->needed_headroom = HEADROOM_FOR_QOS;
+
+ random_ether_addr(dev->dev_addr);
+
+ dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+static int msm_rmnet_smd_probe(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < RMNET_DEVICE_COUNT; i++)
+ if (!strcmp(pdev->name, ch_name[i])) {
+ complete_all(port_complete[i]);
+ break;
+ }
+
+ return 0;
+}
+
+static int __init rmnet_init(void)
+{
+ int ret;
+ struct device *d;
+ struct net_device *dev;
+ struct rmnet_private *p;
+ unsigned n;
+
+ pr_info("%s: SMD devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ timeout_suspend_us = 0;
+#endif
+#endif
+
+ for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+ dev = alloc_netdev(sizeof(struct rmnet_private),
+ "rmnet%d", rmnet_setup);
+
+ if (!dev)
+ return -ENOMEM;
+
+ d = &(dev->dev);
+ p = netdev_priv(dev);
+ p->chname = ch_name[n];
+ /* Initial config uses Ethernet */
+ p->operation_mode = RMNET_MODE_LLP_ETH;
+ p->skb = NULL;
+ spin_lock_init(&p->lock);
+ tasklet_init(&p->tsklt, _rmnet_resume_flow,
+ (unsigned long)dev);
+ wake_lock_init(&p->wake_lock, WAKE_LOCK_SUSPEND, ch_name[n]);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->timeout_us = timeout_us;
+ p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+ init_completion(&p->complete);
+ port_complete[n] = &p->complete;
+ mutex_init(&p->pil_lock);
+ p->pdrv.probe = msm_rmnet_smd_probe;
+ p->pdrv.driver.name = ch_name[n];
+ p->pdrv.driver.owner = THIS_MODULE;
+ ret = platform_driver_register(&p->pdrv);
+ if (ret) {
+ free_netdev(dev);
+ return ret;
+ }
+
+ ret = register_netdev(dev);
+ if (ret) {
+ platform_driver_unregister(&p->pdrv);
+ free_netdev(dev);
+ return ret;
+ }
+
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ if (device_create_file(d, &dev_attr_timeout))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_xmit))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_rcv))
+ continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (device_create_file(d, &dev_attr_timeout_suspend))
+ continue;
+
+ /* Only care about rmnet0 for suspend/resume tiemout hooks. */
+ if (n == 0)
+ rmnet0 = d;
+#endif
+#endif
+ }
+ return 0;
+}
+
+module_init(rmnet_init);
diff --git a/drivers/net/ethernet/msm/msm_rmnet_bam.c b/drivers/net/ethernet/msm/msm_rmnet_bam.c
new file mode 100644
index 0000000..fbe8d3c
--- /dev/null
+++ b/drivers/net/ethernet/msm/msm_rmnet_bam.c
@@ -0,0 +1,826 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * RMNET BAM Module.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+#include <linux/platform_device.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <mach/bam_dmux.h>
+
+/* Debug message support */
+static int msm_rmnet_bam_debug_mask;
+module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do { \
+ if (msm_rmnet_bam_debug_mask & m) \
+ pr_info(x); \
+} while (0)
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_DEVICE_COUNT (8)
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define DEVICE_ID_INVALID -1
+
+#define DEVICE_INACTIVE 0
+#define DEVICE_ACTIVE 1
+
+#define HEADROOM_FOR_BAM 8 /* for mux header */
+#define HEADROOM_FOR_QOS 8
+#define TAILROOM 8 /* for padding by mux layer */
+
+struct rmnet_private {
+ struct net_device_stats stats;
+ uint32_t ch_id;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ ktime_t last_packet;
+ unsigned long wakeups_xmit;
+ unsigned long wakeups_rcv;
+ unsigned long timeout_us;
+#endif
+ struct sk_buff *waiting_for_ul_skb;
+ spinlock_t lock;
+ spinlock_t tx_queue_lock;
+ struct tasklet_struct tsklt;
+ u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
+ uint8_t device_up;
+ uint8_t in_reset;
+};
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ timeout_suspend_us = strict_strtoul(buf, NULL, 10);
+ return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
+ timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler)
+{
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_suspend_us;
+ }
+}
+
+static void rmnet_late_resume(struct early_suspend *handler)
+{
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_us;
+ }
+}
+
+static struct early_suspend rmnet_power_suspend = {
+ .suspend = rmnet_early_suspend,
+ .resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+ register_early_suspend(&rmnet_power_suspend);
+ return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p)
+{
+ int ret = 0;
+ ktime_t now;
+ if (p->timeout_us == 0) /* Check if disabled */
+ return 0;
+
+ /* Use real (wall) time. */
+ now = ktime_get_real();
+
+ if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
+ ret = 1;
+
+ p->last_packet = now;
+ return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return sprintf(buf, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return sprintf(buf, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+ timeout_us = strict_strtoul(buf, NULL, 10);
+#endif
+ return n;
+}
+
+static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p = netdev_priv(to_net_dev(d));
+ return sprintf(buf, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif
+
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ __be16 protocol = 0;
+
+ skb->dev = dev;
+
+ /* Determine L3 protocol */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+ dev->name, skb->data[0] & 0xf0);
+ /* skb will be dropped in upper layer for unknown protocol */
+ }
+ return protocol;
+}
+
+static int count_this_packet(void *_hdr, int len)
+{
+ struct ethhdr *hdr = _hdr;
+
+ if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+ return 0;
+
+ return 1;
+}
+
+/* Rx Callback, Called in Work Queue context */
+static void bam_recv_notify(void *dev, struct sk_buff *skb)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ unsigned long flags;
+ u32 opmode;
+
+ if (skb) {
+ skb->dev = dev;
+ /* Handle Rx frame format */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_IP(opmode)) {
+ /* Driver in IP mode */
+ skb->protocol = rmnet_ip_type_trans(skb, dev);
+ } else {
+ /* Driver in Ethernet mode */
+ skb->protocol = eth_type_trans(skb, dev);
+ }
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_rcv += rmnet_cause_wakeup(p);
+#endif
+ p->stats.rx_packets++;
+ p->stats.rx_bytes += skb->len;
+ }
+ DBG1("[%s] Rx packet #%lu len=%d\n",
+ ((struct net_device *)dev)->name,
+ p->stats.rx_packets, skb->len);
+
+ /* Deliver to network stack */
+ netif_rx(skb);
+ } else
+ pr_err("[%s] %s: No skb received",
+ ((struct net_device *)dev)->name, __func__);
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int bam_ret;
+ struct QMI_QOS_HDR_S *qmih;
+ u32 opmode;
+ unsigned long flags;
+
+ /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_QOS(opmode)) {
+ qmih = (struct QMI_QOS_HDR_S *)
+ skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+ qmih->version = 1;
+ qmih->flags = 0;
+ qmih->flow_id = skb->mark;
+ }
+
+ dev->trans_start = jiffies;
+ /* if write() succeeds, skb access is unsafe in this process */
+ bam_ret = msm_bam_dmux_write(p->ch_id, skb);
+
+ if (bam_ret != 0 && bam_ret != -EAGAIN && bam_ret != -EFAULT) {
+ pr_err("[%s] %s: write returned error %d",
+ dev->name, __func__, bam_ret);
+ return -EPERM;
+ }
+
+ return bam_ret;
+}
+
+static void bam_write_done(void *dev, struct sk_buff *skb)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ u32 opmode = p->operation_mode;
+ unsigned long flags;
+
+ DBG1("%s: write complete\n", __func__);
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+ p->stats.tx_packets++;
+ p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+ }
+ DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+ ((struct net_device *)(dev))->name, p->stats.tx_packets,
+ skb->len, skb->mark);
+ dev_kfree_skb_any(skb);
+
+ spin_lock_irqsave(&p->tx_queue_lock, flags);
+ if (netif_queue_stopped(dev) &&
+ msm_bam_dmux_is_ch_low(p->ch_id)) {
+ DBG0("%s: Low WM hit, waking queue=%p\n",
+ __func__, skb);
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&p->tx_queue_lock, flags);
+}
+
+static void bam_notify(void *dev, int event, unsigned long data)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ unsigned long flags;
+
+ switch (event) {
+ case BAM_DMUX_RECEIVE:
+ bam_recv_notify(dev, (struct sk_buff *)(data));
+ break;
+ case BAM_DMUX_WRITE_DONE:
+ bam_write_done(dev, (struct sk_buff *)(data));
+ break;
+ case BAM_DMUX_UL_CONNECTED:
+ spin_lock_irqsave(&p->lock, flags);
+ if (p->waiting_for_ul_skb != NULL) {
+ struct sk_buff *skb;
+ int ret;
+
+ skb = p->waiting_for_ul_skb;
+ p->waiting_for_ul_skb = NULL;
+ spin_unlock_irqrestore(&p->lock, flags);
+ ret = _rmnet_xmit(skb, dev);
+ if (ret) {
+ pr_err("%s: error %d dropping delayed TX SKB %p\n",
+ __func__, ret, skb);
+ dev_kfree_skb_any(skb);
+ }
+ netif_wake_queue(dev);
+ } else {
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+ break;
+ case BAM_DMUX_UL_DISCONNECTED:
+ break;
+ }
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+ int r;
+ struct rmnet_private *p = netdev_priv(dev);
+
+ DBG0("[%s] __rmnet_open()\n", dev->name);
+
+ if (!p->device_up) {
+ r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
+
+ if (r < 0) {
+ DBG0("%s: ch=%d failed with rc %d\n",
+ __func__, p->ch_id, r);
+ return -ENODEV;
+ }
+ }
+
+ p->device_up = DEVICE_ACTIVE;
+ return 0;
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+ int rc = 0;
+
+ DBG0("[%s] rmnet_open()\n", dev->name);
+
+ rc = __rmnet_open(dev);
+
+ if (rc == 0)
+ netif_start_queue(dev);
+
+ return rc;
+}
+
+
+static int __rmnet_close(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int rc = 0;
+
+ if (p->device_up) {
+ /* do not close rmnet port once up, this causes
+ remote side to hang if tried to open again */
+ p->device_up = DEVICE_INACTIVE;
+ return rc;
+ } else
+ return -EBADF;
+}
+
+
+static int rmnet_stop(struct net_device *dev)
+{
+ DBG0("[%s] rmnet_stop()\n", dev->name);
+
+ __rmnet_close(dev);
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+ return -EINVAL;
+
+ DBG0("[%s] MTU change: old=%d new=%d\n",
+ dev->name, dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ unsigned long flags;
+ int awake;
+ int ret = 0;
+
+ if (netif_queue_stopped(dev)) {
+ pr_err("[%s]fatal: rmnet_xmit called when "
+ "netif_queue is stopped", dev->name);
+ return 0;
+ }
+
+ spin_lock_irqsave(&p->lock, flags);
+ awake = msm_bam_dmux_ul_power_vote();
+ if (!awake) {
+ /* send SKB once wakeup is complete */
+ netif_stop_queue(dev);
+ p->waiting_for_ul_skb = skb;
+ spin_unlock_irqrestore(&p->lock, flags);
+ ret = 0;
+ goto exit;
+ }
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ ret = _rmnet_xmit(skb, dev);
+ if (ret == -EPERM) {
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+
+ /*
+ * detected SSR a bit early. shut some things down now, and leave
+ * the rest to the main ssr handling code when that happens later
+ */
+ if (ret == -EFAULT) {
+ netif_carrier_off(dev);
+ dev_kfree_skb_any(skb);
+ ret = 0;
+ goto exit;
+ }
+
+ if (ret == -EAGAIN) {
+ /*
+ * This should not happen
+ * EAGAIN means we attempted to overflow the high watermark
+ * Clearly the queue is not stopped like it should be, so
+ * stop it and return BUSY to the TCP/IP framework. It will
+ * retry this packet with the queue is restarted which happens
+ * in the write_done callback when the low watermark is hit.
+ */
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+
+ spin_lock_irqsave(&p->tx_queue_lock, flags);
+ if (msm_bam_dmux_is_ch_full(p->ch_id)) {
+ netif_stop_queue(dev);
+ DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb);
+ }
+ spin_unlock_irqrestore(&p->tx_queue_lock, flags);
+
+exit:
+ msm_bam_dmux_ul_power_unvote();
+ return ret;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ return &p->stats;
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+ pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+static const struct net_device_ops rmnet_ops_ether = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ u32 old_opmode = p->operation_mode;
+ unsigned long flags;
+ int prev_mtu = dev->mtu;
+ int rc = 0;
+
+ /* Process IOCTL command */
+ switch (cmd) {
+ case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
+ /* Perform Ethernet config only if in IP mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_IP) {
+ ether_setup(dev);
+ random_ether_addr(dev->dev_addr);
+ dev->mtu = prev_mtu;
+
+ dev->netdev_ops = &rmnet_ops_ether;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_IP;
+ p->operation_mode |= RMNET_MODE_LLP_ETH;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): "
+ "set Ethernet protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
+ /* Perform IP config only if in Ethernet mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+ /* Undo config done in ether_setup() */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = prev_mtu;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST|
+ IFF_MULTICAST);
+
+ dev->needed_headroom = HEADROOM_FOR_BAM +
+ HEADROOM_FOR_QOS;
+ dev->needed_tailroom = TAILROOM;
+ dev->netdev_ops = &rmnet_ops_ip;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+ p->operation_mode |= RMNET_MODE_LLP_IP;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): "
+ "set IP protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode &
+ (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+ break;
+
+ case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode |= RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode & RMNET_MODE_QOS);
+ break;
+
+ case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
+ ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+ break;
+
+ case RMNET_IOCTL_OPEN: /* Open transport port */
+ rc = __rmnet_open(dev);
+ DBG0("[%s] rmnet_ioctl(): open transport port\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_CLOSE: /* Close transport port */
+ rc = __rmnet_close(dev);
+ DBG0("[%s] rmnet_ioctl(): close transport port\n",
+ dev->name);
+ break;
+
+ default:
+ pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+ dev->name, cmd);
+ return -EINVAL;
+ }
+
+ DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+ dev->name, __func__, cmd, old_opmode, p->operation_mode);
+ return rc;
+}
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+ /* Using Ethernet mode by default */
+ dev->netdev_ops = &rmnet_ops_ether;
+ ether_setup(dev);
+
+ /* set this after calling ether_setup */
+ dev->mtu = RMNET_DATA_LEN;
+ dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
+ dev->needed_tailroom = TAILROOM;
+ random_ether_addr(dev->dev_addr);
+
+ dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+static struct net_device *netdevs[RMNET_DEVICE_COUNT];
+static struct platform_driver bam_rmnet_drivers[RMNET_DEVICE_COUNT];
+
+static int bam_rmnet_probe(struct platform_device *pdev)
+{
+ int i;
+ char name[BAM_DMUX_CH_NAME_MAX_LEN];
+ struct rmnet_private *p;
+
+ for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
+ scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
+ if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
+ break;
+ }
+
+ p = netdev_priv(netdevs[i]);
+ if (p->in_reset) {
+ p->in_reset = 0;
+ msm_bam_dmux_open(p->ch_id, netdevs[i], bam_notify);
+ netif_carrier_on(netdevs[i]);
+ netif_start_queue(netdevs[i]);
+ }
+
+ return 0;
+}
+
+static int bam_rmnet_remove(struct platform_device *pdev)
+{
+ int i;
+ char name[BAM_DMUX_CH_NAME_MAX_LEN];
+ struct rmnet_private *p;
+
+ for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
+ scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
+ if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
+ break;
+ }
+
+ p = netdev_priv(netdevs[i]);
+ p->in_reset = 1;
+ if (p->waiting_for_ul_skb != NULL) {
+ dev_kfree_skb_any(p->waiting_for_ul_skb);
+ p->waiting_for_ul_skb = NULL;
+ }
+ msm_bam_dmux_close(p->ch_id);
+ netif_carrier_off(netdevs[i]);
+ netif_stop_queue(netdevs[i]);
+ return 0;
+}
+
+static int __init rmnet_init(void)
+{
+ int ret;
+ struct device *d;
+ struct net_device *dev;
+ struct rmnet_private *p;
+ unsigned n;
+ char *tempname;
+
+ pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ timeout_suspend_us = 0;
+#endif
+#endif
+
+ for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+ dev = alloc_netdev(sizeof(struct rmnet_private),
+ "rmnet%d", rmnet_setup);
+
+ if (!dev) {
+ pr_err("%s: no memory for netdev %d\n", __func__, n);
+ return -ENOMEM;
+ }
+
+ netdevs[n] = dev;
+ d = &(dev->dev);
+ p = netdev_priv(dev);
+ /* Initial config uses Ethernet */
+ p->operation_mode = RMNET_MODE_LLP_ETH;
+ p->ch_id = n;
+ p->waiting_for_ul_skb = NULL;
+ p->in_reset = 0;
+ spin_lock_init(&p->lock);
+ spin_lock_init(&p->tx_queue_lock);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->timeout_us = timeout_us;
+ p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("%s: unable to register netdev"
+ " %d rc=%d\n", __func__, n, ret);
+ free_netdev(dev);
+ return ret;
+ }
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ if (device_create_file(d, &dev_attr_timeout))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_xmit))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_rcv))
+ continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (device_create_file(d, &dev_attr_timeout_suspend))
+ continue;
+
+ /* Only care about rmnet0 for suspend/resume tiemout hooks. */
+ if (n == 0)
+ rmnet0 = d;
+#endif
+#endif
+ bam_rmnet_drivers[n].probe = bam_rmnet_probe;
+ bam_rmnet_drivers[n].remove = bam_rmnet_remove;
+ tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
+ if (tempname == NULL)
+ return -ENOMEM;
+ scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
+ n);
+ bam_rmnet_drivers[n].driver.name = tempname;
+ bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
+ ret = platform_driver_register(&bam_rmnet_drivers[n]);
+ if (ret) {
+ pr_err("%s: registration failed n=%d rc=%d\n",
+ __func__, n, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+module_init(rmnet_init);
+MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/net/ethernet/msm/msm_rmnet_sdio.c b/drivers/net/ethernet/msm/msm_rmnet_sdio.c
new file mode 100644
index 0000000..14fb612
--- /dev/null
+++ b/drivers/net/ethernet/msm/msm_rmnet_sdio.c
@@ -0,0 +1,712 @@
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * RMNET SDIO Module.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <mach/sdio_dmux.h>
+
+/* Debug message support */
+static int msm_rmnet_sdio_debug_mask;
+module_param_named(debug_enable, msm_rmnet_sdio_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do { \
+ if (msm_rmnet_sdio_debug_mask & m) \
+ pr_info(x); \
+} while (0)
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_DEVICE_COUNT (8)
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define DEVICE_ID_INVALID -1
+
+#define DEVICE_INACTIVE 0
+#define DEVICE_ACTIVE 1
+
+#define HEADROOM_FOR_SDIO 8 /* for mux header */
+#define HEADROOM_FOR_QOS 8
+#define TAILROOM 8 /* for padding by mux layer */
+
+struct rmnet_private {
+ struct net_device_stats stats;
+ uint32_t ch_id;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ ktime_t last_packet;
+ unsigned long wakeups_xmit;
+ unsigned long wakeups_rcv;
+ unsigned long timeout_us;
+#endif
+ struct sk_buff *skb;
+ spinlock_t lock;
+ spinlock_t tx_queue_lock;
+ struct tasklet_struct tsklt;
+ u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
+ uint8_t device_up;
+ uint8_t in_reset;
+};
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ timeout_suspend_us = strict_strtoul(buf, NULL, 10);
+ return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
+ timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler)
+{
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_suspend_us;
+ }
+}
+
+static void rmnet_late_resume(struct early_suspend *handler)
+{
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_us;
+ }
+}
+
+static struct early_suspend rmnet_power_suspend = {
+ .suspend = rmnet_early_suspend,
+ .resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+ register_early_suspend(&rmnet_power_suspend);
+ return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p)
+{
+ int ret = 0;
+ ktime_t now;
+ if (p->timeout_us == 0) /* Check if disabled */
+ return 0;
+
+ /* Use real (wall) time. */
+ now = ktime_get_real();
+
+ if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
+ ret = 1;
+
+ p->last_packet = now;
+ return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return sprintf(buf, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return sprintf(buf, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+ timeout_us = strict_strtoul(buf, NULL, 10);
+#endif
+ return n;
+}
+
+static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p = netdev_priv(to_net_dev(d));
+ return sprintf(buf, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif
+
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ __be16 protocol = 0;
+
+ skb->dev = dev;
+
+ /* Determine L3 protocol */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+ dev->name, skb->data[0] & 0xf0);
+ /* skb will be dropped in upper layer for unknown protocol */
+ }
+ return protocol;
+}
+
+static int count_this_packet(void *_hdr, int len)
+{
+ struct ethhdr *hdr = _hdr;
+
+ if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+ return 0;
+
+ return 1;
+}
+
+static int sdio_update_reset_state(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int new_state;
+
+ new_state = msm_sdio_is_channel_in_reset(p->ch_id);
+
+ if (p->in_reset != new_state) {
+ p->in_reset = (uint8_t)new_state;
+
+ if (p->in_reset)
+ netif_carrier_off(dev);
+ else
+ netif_carrier_on(dev);
+ return 1;
+ }
+ return 0;
+}
+
+/* Rx Callback, Called in Work Queue context */
+static void sdio_recv_notify(void *dev, struct sk_buff *skb)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ unsigned long flags;
+ u32 opmode;
+
+ if (skb) {
+ skb->dev = dev;
+ /* Handle Rx frame format */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_IP(opmode)) {
+ /* Driver in IP mode */
+ skb->protocol = rmnet_ip_type_trans(skb, dev);
+ } else {
+ /* Driver in Ethernet mode */
+ skb->protocol = eth_type_trans(skb, dev);
+ }
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_rcv += rmnet_cause_wakeup(p);
+#endif
+ p->stats.rx_packets++;
+ p->stats.rx_bytes += skb->len;
+ }
+ DBG1("[%s] Rx packet #%lu len=%d\n",
+ ((struct net_device *)dev)->name,
+ p->stats.rx_packets, skb->len);
+
+ /* Deliver to network stack */
+ netif_rx(skb);
+ } else {
+ spin_lock_irqsave(&p->lock, flags);
+ if (!sdio_update_reset_state((struct net_device *)dev))
+ pr_err("[%s] %s: No skb received",
+ ((struct net_device *)dev)->name, __func__);
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int sdio_ret;
+ struct QMI_QOS_HDR_S *qmih;
+ u32 opmode;
+ unsigned long flags;
+
+ if (!netif_carrier_ok(dev)) {
+ pr_err("[%s] %s: channel in reset",
+ dev->name, __func__);
+ goto xmit_out;
+ }
+
+ /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_QOS(opmode)) {
+ qmih = (struct QMI_QOS_HDR_S *)
+ skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+ qmih->version = 1;
+ qmih->flags = 0;
+ qmih->flow_id = skb->mark;
+ }
+
+ dev->trans_start = jiffies;
+ sdio_ret = msm_sdio_dmux_write(p->ch_id, skb);
+
+ if (sdio_ret != 0) {
+ pr_err("[%s] %s: write returned error %d",
+ dev->name, __func__, sdio_ret);
+ goto xmit_out;
+ }
+
+ if (count_this_packet(skb->data, skb->len)) {
+ p->stats.tx_packets++;
+ p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+ }
+ DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+ dev->name, p->stats.tx_packets, skb->len, skb->mark);
+
+ return 0;
+xmit_out:
+ dev_kfree_skb_any(skb);
+ p->stats.tx_errors++;
+ return 0;
+}
+
+static void sdio_write_done(void *dev, struct sk_buff *skb)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ unsigned long flags;
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ if (!p->in_reset) {
+ DBG1("%s: write complete skb=%p\n", __func__, skb);
+
+ spin_lock_irqsave(&p->tx_queue_lock, flags);
+ if (netif_queue_stopped(dev) &&
+ msm_sdio_dmux_is_ch_low(p->ch_id)) {
+ DBG0("%s: Low WM hit, waking queue=%p\n",
+ __func__, skb);
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&p->tx_queue_lock, flags);
+ } else {
+ DBG1("%s: write in reset skb=%p\n", __func__, skb);
+ }
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+ int r;
+ struct rmnet_private *p = netdev_priv(dev);
+
+ DBG0("[%s] __rmnet_open()\n", dev->name);
+
+ if (!p->device_up) {
+ r = msm_sdio_dmux_open(p->ch_id, dev,
+ sdio_recv_notify, sdio_write_done);
+
+ if (r < 0)
+ return -ENODEV;
+ }
+
+ p->device_up = DEVICE_ACTIVE;
+ return 0;
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+ int rc = 0;
+
+ DBG0("[%s] rmnet_open()\n", dev->name);
+
+ rc = __rmnet_open(dev);
+
+ if (rc == 0)
+ netif_start_queue(dev);
+
+ return rc;
+}
+
+
+static int __rmnet_close(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int rc = 0;
+
+ if (p->device_up) {
+ /* do not close rmnet port once up, this causes
+ remote side to hang if tried to open again */
+ /* rc = msm_sdio_dmux_close(p->ch_id); */
+ p->device_up = DEVICE_INACTIVE;
+ return rc;
+ } else
+ return -EBADF;
+}
+
+
+static int rmnet_stop(struct net_device *dev)
+{
+ DBG0("[%s] rmnet_stop()\n", dev->name);
+
+ __rmnet_close(dev);
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+ return -EINVAL;
+
+ DBG0("[%s] MTU change: old=%d new=%d\n",
+ dev->name, dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ unsigned long flags;
+
+ if (netif_queue_stopped(dev)) {
+ pr_err("[%s]fatal: rmnet_xmit called when "
+ "netif_queue is stopped", dev->name);
+ return 0;
+ }
+
+ _rmnet_xmit(skb, dev);
+
+ spin_lock_irqsave(&p->tx_queue_lock, flags);
+ if (msm_sdio_dmux_is_ch_full(p->ch_id)) {
+ netif_stop_queue(dev);
+ DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb);
+ }
+ spin_unlock_irqrestore(&p->tx_queue_lock, flags);
+
+ return 0;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+ pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+static const struct net_device_ops rmnet_ops_ether = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_set_rx_mode = rmnet_set_multicast_list,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_set_rx_mode = rmnet_set_multicast_list,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ u32 old_opmode = p->operation_mode;
+ unsigned long flags;
+ int prev_mtu = dev->mtu;
+ int rc = 0;
+
+ /* Process IOCTL command */
+ switch (cmd) {
+ case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
+ /* Perform Ethernet config only if in IP mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_IP) {
+ ether_setup(dev);
+ random_ether_addr(dev->dev_addr);
+ dev->mtu = prev_mtu;
+
+ dev->netdev_ops = &rmnet_ops_ether;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_IP;
+ p->operation_mode |= RMNET_MODE_LLP_ETH;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): "
+ "set Ethernet protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
+ /* Perform IP config only if in Ethernet mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+ /* Undo config done in ether_setup() */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = prev_mtu;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST|
+ IFF_MULTICAST);
+
+ dev->needed_headroom = HEADROOM_FOR_SDIO +
+ HEADROOM_FOR_QOS;
+ dev->needed_tailroom = TAILROOM;
+ dev->netdev_ops = &rmnet_ops_ip;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+ p->operation_mode |= RMNET_MODE_LLP_IP;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): "
+ "set IP protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode &
+ (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+ break;
+
+ case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode |= RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode & RMNET_MODE_QOS);
+ break;
+
+ case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
+ ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+ break;
+
+ case RMNET_IOCTL_OPEN: /* Open transport port */
+ rc = __rmnet_open(dev);
+ DBG0("[%s] rmnet_ioctl(): open transport port\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_CLOSE: /* Close transport port */
+ rc = __rmnet_close(dev);
+ DBG0("[%s] rmnet_ioctl(): close transport port\n",
+ dev->name);
+ break;
+
+ default:
+ pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+ dev->name, cmd);
+ return -EINVAL;
+ }
+
+ DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+ dev->name, __func__, cmd, old_opmode, p->operation_mode);
+ return rc;
+}
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+ /* Using Ethernet mode by default */
+ dev->netdev_ops = &rmnet_ops_ether;
+ ether_setup(dev);
+
+ /* set this after calling ether_setup */
+ dev->mtu = RMNET_DATA_LEN;
+ dev->needed_headroom = HEADROOM_FOR_SDIO + HEADROOM_FOR_QOS ;
+ dev->needed_tailroom = TAILROOM;
+ random_ether_addr(dev->dev_addr);
+
+ dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+
+static int __init rmnet_init(void)
+{
+ int ret;
+ struct device *d;
+ struct net_device *dev;
+ struct rmnet_private *p;
+ unsigned n;
+
+ pr_info("%s: SDIO devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ timeout_suspend_us = 0;
+#endif
+#endif
+
+ for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+ dev = alloc_netdev(sizeof(struct rmnet_private),
+ "rmnet_sdio%d", rmnet_setup);
+
+ if (!dev)
+ return -ENOMEM;
+
+ d = &(dev->dev);
+ p = netdev_priv(dev);
+ /* Initial config uses Ethernet */
+ p->operation_mode = RMNET_MODE_LLP_ETH;
+ p->ch_id = n;
+ spin_lock_init(&p->lock);
+ spin_lock_init(&p->tx_queue_lock);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->timeout_us = timeout_us;
+ p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+ ret = register_netdev(dev);
+ if (ret) {
+ free_netdev(dev);
+ return ret;
+ }
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ if (device_create_file(d, &dev_attr_timeout))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_xmit))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_rcv))
+ continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (device_create_file(d, &dev_attr_timeout_suspend))
+ continue;
+
+ /* Only care about rmnet0 for suspend/resume tiemout hooks. */
+ if (n == 0)
+ rmnet0 = d;
+#endif
+#endif
+ }
+ return 0;
+}
+
+module_init(rmnet_init);
+MODULE_DESCRIPTION("MSM RMNET SDIO TRANSPORT");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/net/ethernet/msm/msm_rmnet_smux.c b/drivers/net/ethernet/msm/msm_rmnet_smux.c
new file mode 100644
index 0000000..fbb3489
--- /dev/null
+++ b/drivers/net/ethernet/msm/msm_rmnet_smux.c
@@ -0,0 +1,938 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * RMNET SMUX Module.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+#include <linux/platform_device.h>
+#include <linux/smux.h>
+#include <linux/ip.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+
+/* Debug message support */
+static int msm_rmnet_smux_debug_mask;
+module_param_named(debug_enable, msm_rmnet_smux_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do { \
+ if (msm_rmnet_smux_debug_mask & m) \
+ pr_info(x); \
+} while (0)
+
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_SMUX_DEVICE_COUNT (1)
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define DEVICE_ID_INVALID -1
+
+#define DEVICE_INACTIVE 0x00
+#define DEVICE_ACTIVE 0x01
+
+#define HEADROOM_FOR_SMUX 8 /* for mux header */
+#define HEADROOM_FOR_QOS 8
+#define TAILROOM 8 /* for padding by mux layer */
+
+struct rmnet_private {
+ struct net_device_stats stats;
+ uint32_t ch_id;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ ktime_t last_packet;
+ unsigned long wakeups_xmit;
+ unsigned long wakeups_rcv;
+ unsigned long timeout_us;
+#endif
+ spinlock_t lock;
+ struct tasklet_struct tsklt;
+ /* IOCTL specified mode (protocol, QoS header) */
+ u32 operation_mode;
+ uint8_t device_state;
+ uint8_t in_reset;
+};
+
+static struct net_device *netdevs[RMNET_SMUX_DEVICE_COUNT];
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ timeout_suspend_us = strict_strtoul(buf, NULL, 10);
+ return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%lu\n",
+ (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
+ timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler)
+{
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_suspend_us;
+ }
+}
+
+static void rmnet_late_resume(struct early_suspend *handler)
+{
+ if (rmnet0) {
+ struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+ p->timeout_us = timeout_us;
+ }
+}
+
+static struct early_suspend rmnet_power_suspend = {
+ .suspend = rmnet_early_suspend,
+ .resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+ register_early_suspend(&rmnet_power_suspend);
+ return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p)
+{
+ int ret = 0;
+ ktime_t now;
+ if (p->timeout_us == 0) /* Check if disabled */
+ return 0;
+
+ /* Use real (wall) time. */
+ now = ktime_get_real();
+
+ if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
+ ret = 1;
+
+ p->last_packet = now;
+ return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ return snprintf(buf, PAGE_SIZE, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+ timeout_us = strict_strtoul(buf, NULL, 10);
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+ return n;
+}
+
+static ssize_t timeout_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rmnet_private *p = netdev_priv(to_net_dev(d));
+ p = netdev_priv(to_net_dev(d));
+ return snprintf(buf, PAGE_SIZE, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif /* CONFIG_MSM_RMNET_DEBUG */
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+
+
+
+static int count_this_packet(void *_hdr, int len)
+{
+ struct ethhdr *hdr = _hdr;
+
+ if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+ return 0;
+
+ return 1;
+}
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ __be16 protocol = 0;
+
+ skb->dev = dev;
+
+ /* Determine L3 protocol */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+ dev->name, skb->data[0] & 0xf0);
+ /* skb will be dropped in upper layer for unknown protocol */
+ }
+ return protocol;
+}
+
+static void smux_read_done(void *rcv_dev, const void *meta_data)
+{
+ struct rmnet_private *p;
+ struct net_device *dev = rcv_dev;
+ u32 opmode;
+ unsigned long flags;
+ struct sk_buff *skb = NULL;
+ const struct smux_meta_read *read_meta_info = meta_data;
+
+ if (!dev || !read_meta_info) {
+ DBG1("%s:invalid read_done callback recieved", __func__);
+ return;
+ }
+
+ p = netdev_priv(dev);
+
+ skb = (struct sk_buff *) read_meta_info->pkt_priv;
+
+ if (!skb || skb->dev != dev) {
+ DBG1("%s: ERR:skb pointer NULL in READ_DONE CALLBACK",
+ __func__);
+ return;
+ }
+
+ /* Handle Rx frame format */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_IP(opmode)) {
+ /* Driver in IP mode */
+ skb->protocol =
+ rmnet_ip_type_trans(skb, dev);
+ } else {
+ /* Driver in Ethernet mode */
+ skb->protocol =
+ eth_type_trans(skb, dev);
+ }
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_rcv +=
+ rmnet_cause_wakeup(p);
+#endif
+ p->stats.rx_packets++;
+ p->stats.rx_bytes += skb->len;
+ }
+ DBG2("[%s] Rx packet #%lu len=%d\n",
+ dev->name, p->stats.rx_packets,
+ skb->len);
+ /* Deliver to network stack */
+ netif_rx(skb);
+
+ return;
+}
+
+static void smux_write_done(void *dev, const void *meta_data)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ u32 opmode;
+ struct sk_buff *skb = NULL;
+ const struct smux_meta_write *write_meta_info = meta_data;
+ unsigned long flags;
+
+ if (!dev || !write_meta_info) {
+ DBG1("%s: ERR:invalid WRITE_DONE callback recieved", __func__);
+ return;
+ }
+
+ skb = (struct sk_buff *) write_meta_info->pkt_priv;
+
+ if (!skb) {
+ DBG1("%s: ERR:skb pointer NULL in WRITE_DONE"
+ " CALLBACK", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ DBG1("%s: write complete\n", __func__);
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+ p->stats.tx_packets++;
+ p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+ }
+ DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+ ((struct net_device *)(dev))->name, p->stats.tx_packets,
+ skb->len, skb->mark);
+ dev_kfree_skb_any(skb);
+ if (netif_queue_stopped(dev) &&
+ msm_smux_is_ch_low(p->ch_id)) {
+ DBG0("%s: Low WM hit, waking queue=%p\n",
+ __func__, skb);
+ netif_wake_queue(dev);
+ }
+}
+
+void rmnet_smux_notify(void *priv, int event_type, const void *metadata)
+{
+ struct rmnet_private *p;
+ struct net_device *dev;
+ unsigned long flags;
+ struct sk_buff *skb = NULL;
+ u32 opmode;
+ const struct smux_meta_disconnected *ssr_info;
+ const struct smux_meta_read *read_meta_info;
+ const struct smux_meta_write *write_meta_info = metadata;
+
+
+ if (!priv)
+ DBG0("%s: priv(cookie) NULL, ignoring notification:"
+ " %d\n", __func__, event_type);
+
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ p = netdev_priv(priv);
+ dev = priv;
+
+ DBG0("[%s] SMUX_CONNECTED event dev:%s\n", __func__, dev->name);
+
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+
+ spin_lock_irqsave(&p->lock, flags);
+ p->device_state = DEVICE_ACTIVE;
+ spin_unlock_irqrestore(&p->lock, flags);
+ break;
+
+ case SMUX_DISCONNECTED:
+ p = netdev_priv(priv);
+ dev = priv;
+ ssr_info = metadata;
+
+ DBG0("[%s] SMUX_DISCONNECTED event dev:%s\n",
+ __func__, dev->name);
+
+ if (ssr_info && ssr_info->is_ssr == 1)
+ DBG0("SSR detected on :%s\n", dev->name);
+
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ spin_lock_irqsave(&p->lock, flags);
+ p->device_state = DEVICE_INACTIVE;
+ spin_unlock_irqrestore(&p->lock, flags);
+ break;
+
+ case SMUX_READ_DONE:
+ smux_read_done(priv, metadata);
+ break;
+
+ case SMUX_READ_FAIL:
+ p = netdev_priv(priv);
+ dev = priv;
+ read_meta_info = metadata;
+
+ if (!dev || !read_meta_info) {
+ DBG1("%s: ERR:invalid read failed callback"
+ " recieved", __func__);
+ return;
+ }
+
+ skb = (struct sk_buff *) read_meta_info->pkt_priv;
+
+ if (!skb) {
+ DBG1("%s: ERR:skb pointer NULL in read fail"
+ " CALLBACK", __func__);
+ return;
+ }
+
+ DBG0("%s: read failed\n", __func__);
+
+ opmode = p->operation_mode;
+
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len))
+ p->stats.rx_dropped++;
+
+ dev_kfree_skb_any(skb);
+ break;
+
+ case SMUX_WRITE_DONE:
+ smux_write_done(priv, metadata);
+ break;
+
+ case SMUX_WRITE_FAIL:
+ p = netdev_priv(priv);
+ dev = priv;
+ write_meta_info = metadata;
+
+ if (!dev || !write_meta_info) {
+ DBG1("%s: ERR:invalid WRITE_DONE"
+ "callback recieved", __func__);
+ return;
+ }
+
+ skb = (struct sk_buff *) write_meta_info->pkt_priv;
+
+ if (!skb) {
+ DBG1("%s: ERR:skb pointer NULL in"
+ " WRITE_DONE CALLBACK", __func__);
+ return;
+ }
+
+ DBG0("%s: write failed\n", __func__);
+
+ opmode = p->operation_mode;
+
+ if (RMNET_IS_MODE_IP(opmode) ||
+ count_this_packet(skb->data, skb->len)) {
+ p->stats.tx_dropped++;
+ }
+
+ dev_kfree_skb_any(skb);
+ break;
+
+ case SMUX_LOW_WM_HIT:
+ dev = priv;
+ DBG0("[%s] Low WM hit dev:%s\n", __func__, dev->name);
+ netif_start_queue(dev);
+ break;
+
+ case SMUX_HIGH_WM_HIT:
+ dev = priv;
+ DBG0("[%s] Low WM hit dev:%s\n", __func__, dev->name);
+ netif_stop_queue(dev);
+ break;
+
+ default:
+ dev = priv;
+ DBG0("[%s] Invalid event:%d received on"
+ " dev: %s\n", __func__, event_type, dev->name);
+ break;
+ }
+
+ return;
+}
+
+int get_rx_buffers(void *priv, void **pkt_priv, void **buffer, int size)
+{
+ struct net_device *dev = (struct net_device *) priv;
+ struct sk_buff *skb = NULL;
+ void *ptr = NULL;
+
+ DBG0("[%s] dev:%s\n", __func__, dev->name);
+ skb = __dev_alloc_skb(size, GFP_ATOMIC);
+ if (skb == NULL) {
+ DBG0("%s: unable to alloc skb\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* TODO skb_reserve(skb, NET_IP_ALIGN); for ethernet mode */
+ /* Populate some params now. */
+ skb->dev = dev;
+ ptr = skb_put(skb, size);
+
+ skb_set_network_header(skb, 0);
+
+ /* done with skb setup, return the buffer pointer. */
+ *pkt_priv = skb;
+ *buffer = ptr;
+
+ return 0;
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+
+ DBG0("[%s] __rmnet_open()\n", dev->name);
+
+ if (p->device_state == DEVICE_ACTIVE) {
+ return 0;
+ } else {
+ DBG0("[%s] Platform inactive\n", dev->name);
+ return -ENODEV;
+ }
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+ int rc = 0;
+
+ DBG0("[%s] rmnet_open()\n", dev->name);
+
+ rc = __rmnet_open(dev);
+
+ if (rc == 0)
+ netif_start_queue(dev);
+
+ return rc;
+}
+
+static int rmnet_stop(struct net_device *dev)
+{
+ DBG0("[%s] rmnet_stop()\n", dev->name);
+
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+ return -EINVAL;
+
+ DBG0("[%s] MTU change: old=%d new=%d\n",
+ dev->name, dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int smux_ret;
+ struct QMI_QOS_HDR_S *qmih;
+ u32 opmode;
+ unsigned long flags;
+
+ /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+ spin_lock_irqsave(&p->lock, flags);
+ opmode = p->operation_mode;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (RMNET_IS_MODE_QOS(opmode)) {
+ qmih = (struct QMI_QOS_HDR_S *)
+ skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+ qmih->version = 1;
+ qmih->flags = 0;
+ qmih->flow_id = skb->mark;
+ }
+
+ dev->trans_start = jiffies;
+
+ /* if write() succeeds, skb access is unsafe in this process */
+ smux_ret = msm_smux_write(p->ch_id, skb, skb->data, skb->len);
+
+ if (smux_ret != 0 && smux_ret != -EAGAIN) {
+ pr_err("[%s] %s: write returned error %d",
+ dev->name, __func__, smux_ret);
+ return -EPERM;
+ }
+
+ return smux_ret;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_queue_stopped(dev) || (p->device_state == DEVICE_INACTIVE)) {
+ pr_err("[%s]fatal: rmnet_xmit called when "
+ "netif_queue is stopped", dev->name);
+ return 0;
+ }
+
+ ret = _rmnet_xmit(skb, dev);
+
+ if (ret == -EPERM) {
+ /* Do not stop the queue here.
+ * It will lead to ir-recoverable state.
+ */
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+
+ if (msm_smux_is_ch_full(p->ch_id) || (ret == -EAGAIN)) {
+ /*
+ * EAGAIN means we attempted to overflow the high watermark
+ * Clearly the queue is not stopped like it should be, so
+ * stop it and return BUSY to the TCP/IP framework. It will
+ * retry this packet with the queue is restarted which happens
+ * low watermark is called.
+ */
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ goto exit;
+ }
+exit:
+ return ret;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ return &p->stats;
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+ pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+static const struct net_device_ops rmnet_ops_ether = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+ .ndo_open = rmnet_open,
+ .ndo_stop = rmnet_stop,
+ .ndo_start_xmit = rmnet_xmit,
+ .ndo_get_stats = rmnet_get_stats,
+ .ndo_tx_timeout = rmnet_tx_timeout,
+ .ndo_do_ioctl = rmnet_ioctl,
+ .ndo_change_mtu = rmnet_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct rmnet_private *p = netdev_priv(dev);
+ u32 old_opmode = p->operation_mode;
+ unsigned long flags;
+ int prev_mtu = dev->mtu;
+ int rc = 0;
+
+ /* Process IOCTL command */
+ switch (cmd) {
+ case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
+ /* Perform Ethernet config only if in IP mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_IP) {
+ ether_setup(dev);
+ random_ether_addr(dev->dev_addr);
+ dev->mtu = prev_mtu;
+
+ dev->netdev_ops = &rmnet_ops_ether;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_IP;
+ p->operation_mode |= RMNET_MODE_LLP_ETH;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): "
+ "set Ethernet protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
+ /* Perform IP config only if in Ethernet mode currently*/
+ if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+ /* Undo config done in ether_setup() */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = prev_mtu;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST |
+ IFF_MULTICAST);
+
+ dev->needed_headroom = HEADROOM_FOR_SMUX +
+ HEADROOM_FOR_QOS;
+ dev->needed_tailroom = TAILROOM;
+ dev->netdev_ops = &rmnet_ops_ip;
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+ p->operation_mode |= RMNET_MODE_LLP_IP;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): "
+ "set IP protocol mode\n",
+ dev->name);
+ }
+ break;
+
+ case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode &
+ (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+ break;
+
+ case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode |= RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
+ spin_lock_irqsave(&p->lock, flags);
+ p->operation_mode &= ~RMNET_MODE_QOS;
+ spin_unlock_irqrestore(&p->lock, flags);
+ DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
+ ifr->ifr_ifru.ifru_data =
+ (void *)(p->operation_mode & RMNET_MODE_QOS);
+ break;
+
+ case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
+ ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+ break;
+
+ case RMNET_IOCTL_OPEN: /* Open transport port */
+ rc = __rmnet_open(dev);
+ DBG0("[%s] rmnet_ioctl(): open transport port\n",
+ dev->name);
+ break;
+
+ case RMNET_IOCTL_CLOSE: /* Close transport port */
+ DBG0("[%s] rmnet_ioctl(): close transport port\n",
+ dev->name);
+ break;
+
+ default:
+ pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+ dev->name, cmd);
+ return -EINVAL;
+ }
+
+ DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+ dev->name, __func__, cmd, old_opmode, p->operation_mode);
+ return rc;
+}
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+ /* Using Ethernet mode by default */
+ dev->netdev_ops = &rmnet_ops_ether;
+ ether_setup(dev);
+
+ /* set this after calling ether_setup */
+ dev->mtu = RMNET_DATA_LEN;
+ dev->needed_headroom = HEADROOM_FOR_SMUX + HEADROOM_FOR_QOS ;
+ dev->needed_tailroom = TAILROOM;
+ random_ether_addr(dev->dev_addr);
+
+ dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+
+static int smux_rmnet_probe(struct platform_device *pdev)
+{
+ int i;
+ int r;
+ struct rmnet_private *p;
+
+ for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) {
+ p = netdev_priv(netdevs[i]);
+
+ if ((p != NULL) && (p->device_state == DEVICE_INACTIVE)) {
+ r = msm_smux_open(p->ch_id,
+ netdevs[i],
+ rmnet_smux_notify,
+ get_rx_buffers);
+
+ if (r < 0) {
+ DBG0("%s: ch=%d open failed with rc %d\n",
+ __func__, p->ch_id, r);
+ }
+ }
+ }
+ return 0;
+}
+
+static int smux_rmnet_remove(struct platform_device *pdev)
+{
+ int i;
+ int r;
+ struct rmnet_private *p;
+
+ for (i = 0; i < RMNET_SMUX_DEVICE_COUNT; i++) {
+ p = netdev_priv(netdevs[i]);
+
+ if ((p != NULL) && (p->device_state == DEVICE_ACTIVE)) {
+ r = msm_smux_close(p->ch_id);
+
+ if (r < 0) {
+ DBG0("%s: ch=%d close failed with rc %d\n",
+ __func__, p->ch_id, r);
+ continue;
+ }
+ netif_carrier_off(netdevs[i]);
+ netif_stop_queue(netdevs[i]);
+ }
+ }
+ return 0;
+}
+
+
+static struct platform_driver smux_rmnet_driver = {
+ .probe = smux_rmnet_probe,
+ .remove = smux_rmnet_remove,
+ .driver = {
+ .name = "SMUX_RMNET",
+ .owner = THIS_MODULE,
+ },
+};
+
+
+static int __init rmnet_init(void)
+{
+ int ret;
+ struct device *d;
+ struct net_device *dev;
+ struct rmnet_private *p;
+ unsigned n;
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ timeout_suspend_us = 0;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_MSM_RMNET_DEBUG */
+
+ for (n = 0; n < RMNET_SMUX_DEVICE_COUNT; n++) {
+ dev = alloc_netdev(sizeof(struct rmnet_private),
+ "rmnet_smux%d", rmnet_setup);
+
+ if (!dev) {
+ pr_err("%s: no memory for netdev %d\n", __func__, n);
+ return -ENOMEM;
+ }
+
+ netdevs[n] = dev;
+ d = &(dev->dev);
+ p = netdev_priv(dev);
+ /* Initial config uses Ethernet */
+ p->operation_mode = RMNET_MODE_LLP_ETH;
+ p->ch_id = n;
+ p->in_reset = 0;
+ spin_lock_init(&p->lock);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ p->timeout_us = timeout_us;
+ p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("%s: unable to register netdev"
+ " %d rc=%d\n", __func__, n, ret);
+ free_netdev(dev);
+ return ret;
+ }
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+ if (device_create_file(d, &dev_attr_timeout))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_xmit))
+ continue;
+ if (device_create_file(d, &dev_attr_wakeups_rcv))
+ continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (device_create_file(d, &dev_attr_timeout_suspend))
+ continue;
+
+ /* Only care about rmnet0 for suspend/resume tiemout hooks. */
+ if (n == 0)
+ rmnet0 = d;
+#endif /* CONFIG_HAS_EARLYSUSPEND */
+#endif /* CONFIG_MSM_RMNET_DEBUG */
+
+ }
+
+ ret = platform_driver_register(&smux_rmnet_driver);
+ if (ret) {
+ pr_err("%s: registration failed n=%d rc=%d\n",
+ __func__, n, ret);
+ return ret;
+ }
+ return 0;
+}
+
+module_init(rmnet_init);
+MODULE_DESCRIPTION("MSM RMNET SMUX TRANSPORT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/msm/qfec.c b/drivers/net/ethernet/msm/qfec.c
new file mode 100644
index 0000000..112e16a
--- /dev/null
+++ b/drivers/net/ethernet/msm/qfec.c
@@ -0,0 +1,2792 @@
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include <linux/platform_device.h>
+
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h> /* mark_bh */
+
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+
+#include <linux/proc_fs.h>
+#include <linux/timer.h>
+#include <linux/mii.h>
+
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/inet.h>
+
+#include "qfec.h"
+
+#define QFEC_NAME "qfec"
+#define QFEC_DRV_VER "Nov 29 2011"
+
+#define ETH_BUF_SIZE 0x600
+#define MAX_N_BD 50
+#define MAC_ADDR_SIZE 6
+
+#define RX_TX_BD_RATIO 8
+#define TX_BD_NUM 256
+#define RX_BD_NUM 256
+#define TX_BD_TI_RATIO 4
+#define MAX_MDIO_REG 32
+
+#define H_DPLX 0
+#define F_DPLX 1
+/*
+ * logging macros
+ */
+#define QFEC_LOG_PR 1
+#define QFEC_LOG_DBG 2
+#define QFEC_LOG_DBG2 4
+#define QFEC_LOG_MDIO_W 8
+#define QFEC_LOG_MDIO_R 16
+#define QFEC_MII_EXP_MASK (EXPANSION_LCWP | EXPANSION_ENABLENPAGE \
+ | EXPANSION_NPCAPABLE)
+
+static int qfec_debug = QFEC_LOG_PR;
+
+#ifdef QFEC_DEBUG
+# define QFEC_LOG(flag, ...) \
+ do { \
+ if (flag & qfec_debug) \
+ pr_info(__VA_ARGS__); \
+ } while (0)
+#else
+# define QFEC_LOG(flag, ...)
+#endif
+
+#define QFEC_LOG_ERR(...) pr_err(__VA_ARGS__)
+
+/*
+ * driver buffer-descriptor
+ * contains the 4 word HW descriptor plus an additional 4-words.
+ * (See the DSL bits in the BUS-Mode register).
+ */
+#define BD_FLAG_LAST_BD 1
+
+struct buf_desc {
+ struct qfec_buf_desc *p_desc;
+ struct sk_buff *skb;
+ void *buf_virt_addr;
+ void *buf_phys_addr;
+ uint32_t last_bd_flag;
+};
+
+/*
+ *inline functions accessing non-struct qfec_buf_desc elements
+ */
+
+/* skb */
+static inline struct sk_buff *qfec_bd_skbuf_get(struct buf_desc *p_bd)
+{
+ return p_bd->skb;
+};
+
+static inline void qfec_bd_skbuf_set(struct buf_desc *p_bd, struct sk_buff *p)
+{
+ p_bd->skb = p;
+};
+
+/* virtual addr */
+static inline void qfec_bd_virt_set(struct buf_desc *p_bd, void *addr)
+{
+ p_bd->buf_virt_addr = addr;
+};
+
+static inline void *qfec_bd_virt_get(struct buf_desc *p_bd)
+{
+ return p_bd->buf_virt_addr;
+};
+
+/* physical addr */
+static inline void qfec_bd_phys_set(struct buf_desc *p_bd, void *addr)
+{
+ p_bd->buf_phys_addr = addr;
+};
+
+static inline void *qfec_bd_phys_get(struct buf_desc *p_bd)
+{
+ return p_bd->buf_phys_addr;
+};
+
+/* last_bd_flag */
+static inline uint32_t qfec_bd_last_bd(struct buf_desc *p_bd)
+{
+ return (p_bd->last_bd_flag != 0);
+};
+
+static inline void qfec_bd_last_bd_set(struct buf_desc *p_bd)
+{
+ p_bd->last_bd_flag = BD_FLAG_LAST_BD;
+};
+
+/*
+ *inline functions accessing struct qfec_buf_desc elements
+ */
+
+/* ownership bit */
+static inline uint32_t qfec_bd_own(struct buf_desc *p_bd)
+{
+ return p_bd->p_desc->status & BUF_OWN;
+};
+
+static inline void qfec_bd_own_set(struct buf_desc *p_bd)
+{
+ p_bd->p_desc->status |= BUF_OWN ;
+};
+
+static inline void qfec_bd_own_clr(struct buf_desc *p_bd)
+{
+ p_bd->p_desc->status &= ~(BUF_OWN);
+};
+
+static inline uint32_t qfec_bd_status_get(struct buf_desc *p_bd)
+{
+ return p_bd->p_desc->status;
+};
+
+static inline void qfec_bd_status_set(struct buf_desc *p_bd, uint32_t status)
+{
+ p_bd->p_desc->status = status;
+};
+
+static inline uint32_t qfec_bd_status_len(struct buf_desc *p_bd)
+{
+ return BUF_RX_FL_GET((*p_bd->p_desc));
+};
+
+/* control register */
+static inline void qfec_bd_ctl_reset(struct buf_desc *p_bd)
+{
+ p_bd->p_desc->ctl = 0;
+};
+
+static inline uint32_t qfec_bd_ctl_get(struct buf_desc *p_bd)
+{
+ return p_bd->p_desc->ctl;
+};
+
+static inline void qfec_bd_ctl_set(struct buf_desc *p_bd, uint32_t val)
+{
+ p_bd->p_desc->ctl |= val;
+};
+
+static inline void qfec_bd_ctl_wr(struct buf_desc *p_bd, uint32_t val)
+{
+ p_bd->p_desc->ctl = val;
+};
+
+/* pbuf register */
+static inline void *qfec_bd_pbuf_get(struct buf_desc *p_bd)
+{
+ return p_bd->p_desc->p_buf;
+}
+
+static inline void qfec_bd_pbuf_set(struct buf_desc *p_bd, void *p)
+{
+ p_bd->p_desc->p_buf = p;
+}
+
+/* next register */
+static inline void *qfec_bd_next_get(struct buf_desc *p_bd)
+{
+ return p_bd->p_desc->next;
+};
+
+/*
+ * initialize an RX BD w/ a new buf
+ */
+static int qfec_rbd_init(struct net_device *dev, struct buf_desc *p_bd)
+{
+ struct sk_buff *skb;
+ void *p;
+ void *v;
+
+ /* allocate and record ptrs for sk buff */
+ skb = dev_alloc_skb(ETH_BUF_SIZE);
+ if (!skb)
+ goto err;
+
+ qfec_bd_skbuf_set(p_bd, skb);
+
+ v = skb_put(skb, ETH_BUF_SIZE);
+ qfec_bd_virt_set(p_bd, v);
+
+ p = (void *) dma_map_single(&dev->dev,
+ (void *)skb->data, ETH_BUF_SIZE, DMA_FROM_DEVICE);
+ qfec_bd_pbuf_set(p_bd, p);
+ qfec_bd_phys_set(p_bd, p);
+
+ /* populate control register */
+ /* mark the last BD and set end-of-ring bit */
+ qfec_bd_ctl_wr(p_bd, ETH_BUF_SIZE |
+ (qfec_bd_last_bd(p_bd) ? BUF_RX_RER : 0));
+
+ qfec_bd_status_set(p_bd, BUF_OWN);
+
+ if (!(qfec_debug & QFEC_LOG_DBG2))
+ return 0;
+
+ /* debug messages */
+ QFEC_LOG(QFEC_LOG_DBG2, "%s: %p bd\n", __func__, p_bd);
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s: %p skb\n", __func__, skb);
+
+ QFEC_LOG(QFEC_LOG_DBG2,
+ "%s: %p p_bd, %p data, %p skb_put, %p virt, %p p_buf, %p p\n",
+ __func__, (void *)p_bd,
+ (void *)skb->data, v, /*(void *)skb_put(skb, ETH_BUF_SIZE), */
+ (void *)qfec_bd_virt_get(p_bd), (void *)qfec_bd_pbuf_get(p_bd),
+ (void *)p);
+
+ return 0;
+
+err:
+ return -ENOMEM;
+};
+
+/*
+ * ring structure used to maintain indices of buffer-descriptor (BD) usage
+ *
+ * The RX BDs are normally all pre-allocated with buffers available to be
+ * DMA'd into with received frames. The head indicates the first BD/buffer
+ * containing a received frame, and the tail indicates the oldest BD/buffer
+ * that needs to be restored for use. Head and tail are both initialized
+ * to zero, and n_free is initialized to zero, since all BD are initialized.
+ *
+ * The TX BDs are normally available for use, only being initialized as
+ * TX frames are requested for transmission. The head indicates the
+ * first available BD, and the tail indicate the oldest BD that has
+ * not been acknowledged as transmitted. Head and tail are both initialized
+ * to zero, and n_free is initialized to len, since all are available for use.
+ */
+struct ring {
+ int head;
+ int tail;
+ int n_free;
+ int len;
+};
+
+/* accessory in line functions for struct ring */
+static inline void qfec_ring_init(struct ring *p_ring, int size, int free)
+{
+ p_ring->head = p_ring->tail = 0;
+ p_ring->len = size;
+ p_ring->n_free = free;
+}
+
+static inline int qfec_ring_full(struct ring *p_ring)
+{
+ return (p_ring->n_free == 0);
+};
+
+static inline int qfec_ring_empty(struct ring *p_ring)
+{
+ return (p_ring->n_free == p_ring->len);
+}
+
+static inline void qfec_ring_head_adv(struct ring *p_ring)
+{
+ if (++p_ring->head == p_ring->len)
+ p_ring->head = 0;
+ p_ring->n_free--;
+};
+
+static inline void qfec_ring_tail_adv(struct ring *p_ring)
+{
+ if (++p_ring->tail == p_ring->len)
+ p_ring->tail = 0;
+ p_ring->n_free++;
+};
+
+static inline int qfec_ring_head(struct ring *p_ring)
+{
+
+ return p_ring->head;
+};
+
+static inline int qfec_ring_tail(struct ring *p_ring)
+{
+ return p_ring->tail;
+};
+
+static inline int qfec_ring_room(struct ring *p_ring)
+{
+ return p_ring->n_free;
+};
+
+/*
+ * counters track normal and abnormal driver events and activity
+ */
+enum cntr {
+ isr = 0,
+ fatal_bus,
+
+ early_tx,
+ tx_no_resource,
+ tx_proc_stopped,
+ tx_jabber_tmout,
+
+ xmit,
+ tx_int,
+ tx_isr,
+ tx_owned,
+ tx_underflow,
+
+ tx_replenish,
+ tx_skb_null,
+ tx_timeout,
+ tx_too_large,
+
+ gmac_isr,
+
+ /* half */
+ norm_int,
+ abnorm_int,
+
+ early_rx,
+ rx_buf_unavail,
+ rx_proc_stopped,
+ rx_watchdog,
+
+ netif_rx_cntr,
+ rx_int,
+ rx_isr,
+ rx_owned,
+ rx_overflow,
+
+ rx_dropped,
+ rx_skb_null,
+ queue_start,
+ queue_stop,
+
+ rx_paddr_nok,
+ ts_ioctl,
+ ts_tx_en,
+ ts_tx_rtn,
+
+ ts_rec,
+ cntr_last,
+};
+
+static char *cntr_name[] = {
+ "isr",
+ "fatal_bus",
+
+ "early_tx",
+ "tx_no_resource",
+ "tx_proc_stopped",
+ "tx_jabber_tmout",
+
+ "xmit",
+ "tx_int",
+ "tx_isr",
+ "tx_owned",
+ "tx_underflow",
+
+ "tx_replenish",
+ "tx_skb_null",
+ "tx_timeout",
+ "tx_too_large",
+
+ "gmac_isr",
+
+ /* half */
+ "norm_int",
+ "abnorm_int",
+
+ "early_rx",
+ "rx_buf_unavail",
+ "rx_proc_stopped",
+ "rx_watchdog",
+
+ "netif_rx",
+ "rx_int",
+ "rx_isr",
+ "rx_owned",
+ "rx_overflow",
+
+ "rx_dropped",
+ "rx_skb_null",
+ "queue_start",
+ "queue_stop",
+
+ "rx_paddr_nok",
+ "ts_ioctl",
+ "ts_tx_en",
+ "ts_tx_rtn",
+
+ "ts_rec",
+ ""
+};
+
+/*
+ * private data
+ */
+
+static struct net_device *qfec_dev;
+
+enum qfec_state {
+ timestamping = 0x04,
+};
+
+struct qfec_priv {
+ struct net_device *net_dev;
+ struct net_device_stats stats; /* req statistics */
+
+ struct device dev;
+
+ spinlock_t xmit_lock;
+ spinlock_t mdio_lock;
+
+ unsigned int state; /* driver state */
+
+ unsigned int bd_size; /* buf-desc alloc size */
+ struct qfec_buf_desc *bd_base; /* * qfec-buf-desc */
+ dma_addr_t tbd_dma; /* dma/phy-addr buf-desc */
+ dma_addr_t rbd_dma; /* dma/phy-addr buf-desc */
+
+ struct resource *mac_res;
+ void *mac_base; /* mac (virt) base address */
+
+ struct resource *clk_res;
+ void *clk_base; /* clk (virt) base address */
+
+ struct resource *fuse_res;
+ void *fuse_base; /* mac addr fuses */
+
+ unsigned int n_tbd; /* # of TX buf-desc */
+ struct ring ring_tbd; /* TX ring */
+ struct buf_desc *p_tbd;
+ unsigned int tx_ic_mod; /* (%) val for setting IC */
+
+ unsigned int n_rbd; /* # of RX buf-desc */
+ struct ring ring_rbd; /* RX ring */
+ struct buf_desc *p_rbd;
+
+ struct buf_desc *p_latest_rbd;
+ struct buf_desc *p_ending_rbd;
+
+ unsigned long cntr[cntr_last]; /* activity counters */
+
+ struct mii_if_info mii; /* used by mii lib */
+
+ int mdio_clk; /* phy mdio clock rate */
+ int phy_id; /* default PHY addr (0) */
+ struct timer_list phy_tmr; /* monitor PHY state */
+};
+
+/*
+ * cntrs display
+ */
+
+static int qfec_cntrs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ int h = (cntr_last + 1) / 2;
+ int l;
+ int n;
+ int count = PAGE_SIZE;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+ l = snprintf(&buf[0], count, "%s:\n", __func__);
+ for (n = 0; n < h; n++) {
+ l += snprintf(&buf[l], count - l,
+ " %12lu %-16s %12lu %s\n",
+ priv->cntr[n], cntr_name[n],
+ priv->cntr[n+h], cntr_name[n+h]);
+ }
+
+ return l;
+}
+
+# define CNTR_INC(priv, name) (priv->cntr[name]++)
+
+/*
+ * functions that manage state
+ */
+static inline void qfec_queue_start(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+
+ if (netif_queue_stopped(dev)) {
+ netif_wake_queue(dev);
+ CNTR_INC(priv, queue_start);
+ }
+};
+
+static inline void qfec_queue_stop(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ CNTR_INC(priv, queue_stop);
+};
+
+/*
+ * functions to access and initialize the MAC registers
+ */
+static inline uint32_t qfec_reg_read(struct qfec_priv *priv, uint32_t reg)
+{
+ return ioread32((void *) (priv->mac_base + reg));
+}
+
+static void qfec_reg_write(struct qfec_priv *priv, uint32_t reg, uint32_t val)
+{
+ uint32_t addr = (uint32_t)priv->mac_base + reg;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
+ iowrite32(val, (void *)addr);
+}
+
+/*
+ * speed/duplex/pause settings
+ */
+static int qfec_config_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ int cfg = qfec_reg_read(priv, MAC_CONFIG_REG);
+ int flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
+ int l = 0;
+ int count = PAGE_SIZE;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+ l += snprintf(&buf[l], count, "%s:", __func__);
+
+ l += snprintf(&buf[l], count - l, " [0x%08x] %4dM %s %s", cfg,
+ (cfg & MAC_CONFIG_REG_PS)
+ ? ((cfg & MAC_CONFIG_REG_FES) ? 100 : 10) : 1000,
+ cfg & MAC_CONFIG_REG_DM ? "FD" : "HD",
+ cfg & MAC_CONFIG_REG_IPC ? "IPC" : "NoIPC");
+
+ flow &= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
+ l += snprintf(&buf[l], count - l, " [0x%08x] %s", flow,
+ (flow == (FLOW_CONTROL_RFE | FLOW_CONTROL_TFE)) ? "PAUSE"
+ : ((flow == FLOW_CONTROL_RFE) ? "RX-PAUSE"
+ : ((flow == FLOW_CONTROL_TFE) ? "TX-PAUSE" : "")));
+
+ l += snprintf(&buf[l], count - l, " %s", QFEC_DRV_VER);
+ l += snprintf(&buf[l], count - l, "\n");
+ return l;
+}
+
+
+/*
+ * table and functions to initialize controller registers
+ */
+
+struct reg_entry {
+ unsigned int rdonly;
+ unsigned int addr;
+ char *label;
+ unsigned int val;
+};
+
+static struct reg_entry qfec_reg_tbl[] = {
+ { 0, BUS_MODE_REG, "BUS_MODE_REG", BUS_MODE_REG_DEFAULT },
+ { 0, AXI_BUS_MODE_REG, "AXI_BUS_MODE_REG", AXI_BUS_MODE_DEFAULT },
+ { 0, AXI_STATUS_REG, "AXI_STATUS_REG", 0 },
+
+ { 0, MAC_ADR_0_HIGH_REG, "MAC_ADR_0_HIGH_REG", 0x00000302 },
+ { 0, MAC_ADR_0_LOW_REG, "MAC_ADR_0_LOW_REG", 0x01350702 },
+
+ { 1, RX_DES_LST_ADR_REG, "RX_DES_LST_ADR_REG", 0 },
+ { 1, TX_DES_LST_ADR_REG, "TX_DES_LST_ADR_REG", 0 },
+ { 1, STATUS_REG, "STATUS_REG", 0 },
+ { 1, DEBUG_REG, "DEBUG_REG", 0 },
+
+ { 0, INTRP_EN_REG, "INTRP_EN_REG", QFEC_INTRP_SETUP},
+
+ { 1, CUR_HOST_TX_DES_REG, "CUR_HOST_TX_DES_REG", 0 },
+ { 1, CUR_HOST_RX_DES_REG, "CUR_HOST_RX_DES_REG", 0 },
+ { 1, CUR_HOST_TX_BU_ADR_REG, "CUR_HOST_TX_BU_ADR_REG", 0 },
+ { 1, CUR_HOST_RX_BU_ADR_REG, "CUR_HOST_RX_BU_ADR_REG", 0 },
+
+ { 1, MAC_FR_FILTER_REG, "MAC_FR_FILTER_REG", 0 },
+
+ { 0, MAC_CONFIG_REG, "MAC_CONFIG_REG", MAC_CONFIG_REG_SPD_1G
+ | MAC_CONFIG_REG_DM
+ | MAC_CONFIG_REG_TE
+ | MAC_CONFIG_REG_RE
+ | MAC_CONFIG_REG_IPC },
+
+ { 1, INTRP_STATUS_REG, "INTRP_STATUS_REG", 0 },
+ { 1, INTRP_MASK_REG, "INTRP_MASK_REG", 0 },
+
+ { 0, OPER_MODE_REG, "OPER_MODE_REG", OPER_MODE_REG_DEFAULT },
+
+ { 1, GMII_ADR_REG, "GMII_ADR_REG", 0 },
+ { 1, GMII_DATA_REG, "GMII_DATA_REG", 0 },
+
+ { 0, MMC_INTR_MASK_RX_REG, "MMC_INTR_MASK_RX_REG", 0xFFFFFFFF },
+ { 0, MMC_INTR_MASK_TX_REG, "MMC_INTR_MASK_TX_REG", 0xFFFFFFFF },
+
+ { 1, TS_HIGH_REG, "TS_HIGH_REG", 0 },
+ { 1, TS_LOW_REG, "TS_LOW_REG", 0 },
+
+ { 1, TS_HI_UPDT_REG, "TS_HI_UPDATE_REG", 0 },
+ { 1, TS_LO_UPDT_REG, "TS_LO_UPDATE_REG", 0 },
+ { 0, TS_SUB_SEC_INCR_REG, "TS_SUB_SEC_INCR_REG", 1 },
+ { 0, TS_CTL_REG, "TS_CTL_REG", TS_CTL_TSENALL
+ | TS_CTL_TSCTRLSSR
+ | TS_CTL_TSINIT
+ | TS_CTL_TSENA },
+};
+
+static void qfec_reg_init(struct qfec_priv *priv)
+{
+ struct reg_entry *p = qfec_reg_tbl;
+ int n = ARRAY_SIZE(qfec_reg_tbl);
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ for (; n--; p++) {
+ if (!p->rdonly)
+ qfec_reg_write(priv, p->addr, p->val);
+ }
+}
+
+/*
+ * display registers thru sysfs
+ */
+static int qfec_reg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ struct reg_entry *p = qfec_reg_tbl;
+ int n = ARRAY_SIZE(qfec_reg_tbl);
+ int l = 0;
+ int count = PAGE_SIZE;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+ for (; n--; p++) {
+ l += snprintf(&buf[l], count - l, " %8p %04x %08x %s\n",
+ (void *)priv->mac_base + p->addr, p->addr,
+ qfec_reg_read(priv, p->addr), p->label);
+ }
+
+ return l;
+}
+
+/*
+ * set the MAC-0 address
+ */
+static void qfec_set_adr_regs(struct qfec_priv *priv, uint8_t *addr)
+{
+ uint32_t h = 0;
+ uint32_t l = 0;
+
+ h = h << 8 | addr[5];
+ h = h << 8 | addr[4];
+
+ l = l << 8 | addr[3];
+ l = l << 8 | addr[2];
+ l = l << 8 | addr[1];
+ l = l << 8 | addr[0];
+
+ qfec_reg_write(priv, MAC_ADR_0_HIGH_REG, h);
+ qfec_reg_write(priv, MAC_ADR_0_LOW_REG, l);
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %08x %08x\n", __func__, h, l);
+}
+
+/*
+ * set up the RX filter
+ */
+static void qfec_set_rx_mode(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ uint32_t filter_conf;
+ int index;
+
+ /* Clear address filter entries */
+ for (index = 1; index < MAC_ADR_MAX; ++index) {
+ qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), 0);
+ qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), 0);
+ }
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Receive all frames */
+ filter_conf = MAC_FR_FILTER_RA;
+ } else if ((dev->flags & IFF_MULTICAST) == 0) {
+ /* Unicast filtering only */
+ filter_conf = MAC_FR_FILTER_HPF;
+ } else if ((netdev_mc_count(dev) > MAC_ADR_MAX - 1) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Unicast filtering is enabled, Pass all multicast frames */
+ filter_conf = MAC_FR_FILTER_HPF | MAC_FR_FILTER_PM;
+ } else {
+ struct netdev_hw_addr *ha;
+
+ /* Both unicast and multicast filtering are enabled */
+ filter_conf = MAC_FR_FILTER_HPF;
+
+ index = 1;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ uint32_t high, low;
+
+ high = (1 << 31) | (ha->addr[5] << 8) | (ha->addr[4]);
+ low = (ha->addr[3] << 24) | (ha->addr[2] << 16) |
+ (ha->addr[1] << 8) | (ha->addr[0]);
+
+ qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), high);
+ qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), low);
+
+ index++;
+ }
+ }
+
+ qfec_reg_write(priv, MAC_FR_FILTER_REG, filter_conf);
+}
+
+/*
+ * reset the controller
+ */
+
+#define QFEC_RESET_TIMEOUT 10000
+ /* reset should always clear but did not w/o test/delay
+ * in RgMii mode. there is no spec'd max timeout
+ */
+
+static int qfec_hw_reset(struct qfec_priv *priv)
+{
+ int timeout = QFEC_RESET_TIMEOUT;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ qfec_reg_write(priv, BUS_MODE_REG, BUS_MODE_SWR);
+
+ while (qfec_reg_read(priv, BUS_MODE_REG) & BUS_MODE_SWR) {
+ if (timeout-- == 0) {
+ QFEC_LOG_ERR("%s: timeout\n", __func__);
+ return -ETIME;
+ }
+
+ /* there were problems resetting the controller
+ * in RGMII mode when there wasn't sufficient
+ * delay between register reads
+ */
+ usleep_range(100, 200);
+ }
+
+ return 0;
+}
+
+/*
+ * initialize controller
+ */
+static int qfec_hw_init(struct qfec_priv *priv)
+{
+ int res = 0;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ res = qfec_hw_reset(priv);
+ if (res)
+ return res;
+
+ qfec_reg_init(priv);
+
+ /* config buf-desc locations */
+ qfec_reg_write(priv, TX_DES_LST_ADR_REG, priv->tbd_dma);
+ qfec_reg_write(priv, RX_DES_LST_ADR_REG, priv->rbd_dma);
+
+ /* clear interrupts */
+ qfec_reg_write(priv, STATUS_REG, INTRP_EN_REG_NIE | INTRP_EN_REG_RIE
+ | INTRP_EN_REG_TIE | INTRP_EN_REG_TUE | INTRP_EN_REG_ETE);
+
+ if (priv->mii.supports_gmii) {
+ /* Clear RGMII */
+ qfec_reg_read(priv, SG_RG_SMII_STATUS_REG);
+ /* Disable RGMII int */
+ qfec_reg_write(priv, INTRP_MASK_REG, 1);
+ }
+
+ return res;
+}
+
+/*
+ * en/disable controller
+ */
+static void qfec_hw_enable(struct qfec_priv *priv)
+{
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ qfec_reg_write(priv, OPER_MODE_REG,
+ qfec_reg_read(priv, OPER_MODE_REG)
+ | OPER_MODE_REG_ST | OPER_MODE_REG_SR);
+}
+
+static void qfec_hw_disable(struct qfec_priv *priv)
+{
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ qfec_reg_write(priv, OPER_MODE_REG,
+ qfec_reg_read(priv, OPER_MODE_REG)
+ & ~(OPER_MODE_REG_ST | OPER_MODE_REG_SR));
+}
+
+/*
+ * interface selection
+ */
+struct intf_config {
+ uint32_t intf_sel;
+ uint32_t emac_ns;
+ uint32_t eth_x_en_ns;
+ uint32_t clkmux_sel;
+};
+
+#define ETH_X_EN_NS_REVMII (ETH_X_EN_NS_DEFAULT | ETH_TX_CLK_INV)
+#define CLKMUX_REVMII (EMAC_CLKMUX_SEL_0 | EMAC_CLKMUX_SEL_1)
+
+static struct intf_config intf_config_tbl[] = {
+ { EMAC_PHY_INTF_SEL_MII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
+ { EMAC_PHY_INTF_SEL_RGMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
+ { EMAC_PHY_INTF_SEL_REVMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_REVMII,
+ CLKMUX_REVMII }
+};
+
+/*
+ * emac clk register read and write functions
+ */
+static inline uint32_t qfec_clkreg_read(struct qfec_priv *priv, uint32_t reg)
+{
+ return ioread32((void *) (priv->clk_base + reg));
+}
+
+static inline void qfec_clkreg_write(struct qfec_priv *priv,
+ uint32_t reg, uint32_t val)
+{
+ uint32_t addr = (uint32_t)priv->clk_base + reg;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
+ iowrite32(val, (void *)addr);
+}
+
+/*
+ * configure the PHY interface and clock routing and signal bits
+ */
+enum phy_intfc {
+ INTFC_MII = 0,
+ INTFC_RGMII = 1,
+ INTFC_REVMII = 2,
+};
+
+static int qfec_intf_sel(struct qfec_priv *priv, unsigned int intfc)
+{
+ struct intf_config *p;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s: %d\n", __func__, intfc);
+
+ if (intfc > INTFC_REVMII) {
+ QFEC_LOG_ERR("%s: range\n", __func__);
+ return -ENXIO;
+ }
+
+ p = &intf_config_tbl[intfc];
+
+ qfec_clkreg_write(priv, EMAC_PHY_INTF_SEL_REG, p->intf_sel);
+ qfec_clkreg_write(priv, EMAC_NS_REG, p->emac_ns);
+ qfec_clkreg_write(priv, ETH_X_EN_NS_REG, p->eth_x_en_ns);
+ qfec_clkreg_write(priv, EMAC_CLKMUX_SEL_REG, p->clkmux_sel);
+
+ return 0;
+}
+
+/*
+ * display registers thru proc-fs
+ */
+static struct qfec_clk_reg {
+ uint32_t offset;
+ char *label;
+} qfec_clk_regs[] = {
+ { ETH_MD_REG, "ETH_MD_REG" },
+ { ETH_NS_REG, "ETH_NS_REG" },
+ { ETH_X_EN_NS_REG, "ETH_X_EN_NS_REG" },
+ { EMAC_PTP_MD_REG, "EMAC_PTP_MD_REG" },
+ { EMAC_PTP_NS_REG, "EMAC_PTP_NS_REG" },
+ { EMAC_NS_REG, "EMAC_NS_REG" },
+ { EMAC_TX_FS_REG, "EMAC_TX_FS_REG" },
+ { EMAC_RX_FS_REG, "EMAC_RX_FS_REG" },
+ { EMAC_PHY_INTF_SEL_REG, "EMAC_PHY_INTF_SEL_REG" },
+ { EMAC_PHY_ADDR_REG, "EMAC_PHY_ADDR_REG" },
+ { EMAC_REVMII_PHY_ADDR_REG, "EMAC_REVMII_PHY_ADDR_REG" },
+ { EMAC_CLKMUX_SEL_REG, "EMAC_CLKMUX_SEL_REG" },
+};
+
+static int qfec_clk_reg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ struct qfec_clk_reg *p = qfec_clk_regs;
+ int n = ARRAY_SIZE(qfec_clk_regs);
+ int l = 0;
+ int count = PAGE_SIZE;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+ for (; n--; p++) {
+ l += snprintf(&buf[l], count - l, " %8p %8x %08x %s\n",
+ (void *)priv->clk_base + p->offset, p->offset,
+ qfec_clkreg_read(priv, p->offset), p->label);
+ }
+
+ return l;
+}
+
+/*
+ * speed selection
+ */
+
+struct qfec_pll_cfg {
+ uint32_t spd;
+ uint32_t eth_md; /* M [31:16], NOT 2*D [15:0] */
+ uint32_t eth_ns; /* NOT(M-N) [31:16], ctl bits [11:0] */
+};
+
+static struct qfec_pll_cfg qfec_pll_cfg_tbl[] = {
+ /* 2.5 MHz */
+ { MAC_CONFIG_REG_SPD_10, ETH_MD_M(1) | ETH_MD_2D_N(100),
+ ETH_NS_NM(100-1)
+ | ETH_NS_MCNTR_EN
+ | ETH_NS_MCNTR_MODE_DUAL
+ | ETH_NS_PRE_DIV(0)
+ | CLK_SRC_PLL_EMAC },
+ /* 25 MHz */
+ { MAC_CONFIG_REG_SPD_100, ETH_MD_M(1) | ETH_MD_2D_N(10),
+ ETH_NS_NM(10-1)
+ | ETH_NS_MCNTR_EN
+ | ETH_NS_MCNTR_MODE_DUAL
+ | ETH_NS_PRE_DIV(0)
+ | CLK_SRC_PLL_EMAC },
+ /* 125 MHz */
+ {MAC_CONFIG_REG_SPD_1G, 0, ETH_NS_PRE_DIV(1)
+ | CLK_SRC_PLL_EMAC },
+};
+
+enum speed {
+ SPD_10 = 0,
+ SPD_100 = 1,
+ SPD_1000 = 2,
+};
+
+/*
+ * configure the PHY interface and clock routing and signal bits
+ */
+static int qfec_speed_cfg(struct net_device *dev, unsigned int spd,
+ unsigned int dplx)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ struct qfec_pll_cfg *p;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s: %d spd, %d dplx\n", __func__, spd, dplx);
+
+ if (spd > SPD_1000) {
+ QFEC_LOG_ERR("%s: range\n", __func__);
+ return -ENODEV;
+ }
+
+ p = &qfec_pll_cfg_tbl[spd];
+
+ /* set the MAC speed bits */
+ qfec_reg_write(priv, MAC_CONFIG_REG,
+ (qfec_reg_read(priv, MAC_CONFIG_REG)
+ & ~(MAC_CONFIG_REG_SPD | MAC_CONFIG_REG_DM))
+ | p->spd | (dplx ? MAC_CONFIG_REG_DM : H_DPLX));
+
+ qfec_clkreg_write(priv, ETH_MD_REG, p->eth_md);
+ qfec_clkreg_write(priv, ETH_NS_REG, p->eth_ns);
+
+ return 0;
+}
+
+/*
+ * configure PTP divider for 25 MHz assuming EMAC PLL 250 MHz
+ */
+
+static struct qfec_pll_cfg qfec_pll_ptp = {
+ /* 19.2 MHz tcxo */
+ 0, 0, ETH_NS_PRE_DIV(0)
+ | EMAC_PTP_NS_ROOT_EN
+ | EMAC_PTP_NS_CLK_EN
+ | CLK_SRC_TCXO
+};
+
+#define PLLTEST_PAD_CFG 0x01E0
+#define PLLTEST_PLL_7 0x3700
+
+#define CLKTEST_REG 0x01EC
+#define CLKTEST_EMAC_RX 0x3fc07f7a
+
+static int qfec_ptp_cfg(struct qfec_priv *priv)
+{
+ struct qfec_pll_cfg *p = &qfec_pll_ptp;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x md, %08x ns\n",
+ __func__, p->eth_md, p->eth_ns);
+
+ qfec_clkreg_write(priv, EMAC_PTP_MD_REG, p->eth_md);
+ qfec_clkreg_write(priv, EMAC_PTP_NS_REG, p->eth_ns);
+
+ /* configure HS/LS clk test ports to verify clks */
+ qfec_clkreg_write(priv, CLKTEST_REG, CLKTEST_EMAC_RX);
+ qfec_clkreg_write(priv, PLLTEST_PAD_CFG, PLLTEST_PLL_7);
+
+ return 0;
+}
+
+/*
+ * MDIO operations
+ */
+
+/*
+ * wait reasonable amount of time for MDIO operation to complete, not busy
+ */
+static int qfec_mdio_busy(struct net_device *dev)
+{
+ int i;
+
+ for (i = 100; i > 0; i--) {
+ if (!(qfec_reg_read(
+ netdev_priv(dev), GMII_ADR_REG) & GMII_ADR_REG_GB)) {
+ return 0;
+ }
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+/*
+ * initiate either a read or write MDIO operation
+ */
+
+static int qfec_mdio_oper(struct net_device *dev, int phy_id, int reg, int wr)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ int res = 0;
+
+ /* insure phy not busy */
+ res = qfec_mdio_busy(dev);
+ if (res) {
+ QFEC_LOG_ERR("%s: busy\n", __func__);
+ goto done;
+ }
+
+ /* initiate operation */
+ qfec_reg_write(priv, GMII_ADR_REG,
+ GMII_ADR_REG_ADR_SET(phy_id)
+ | GMII_ADR_REG_REG_SET(reg)
+ | GMII_ADR_REG_CSR_SET(priv->mdio_clk)
+ | (wr ? GMII_ADR_REG_GW : 0)
+ | GMII_ADR_REG_GB);
+
+ /* wait for operation to complete */
+ res = qfec_mdio_busy(dev);
+ if (res)
+ QFEC_LOG_ERR("%s: timeout\n", __func__);
+
+done:
+ return res;
+}
+
+/*
+ * read MDIO register
+ */
+static int qfec_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ int res = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->mdio_lock, flags);
+
+ res = qfec_mdio_oper(dev, phy_id, reg, 0);
+ if (res) {
+ QFEC_LOG_ERR("%s: oper\n", __func__);
+ goto done;
+ }
+
+ res = qfec_reg_read(priv, GMII_DATA_REG);
+ QFEC_LOG(QFEC_LOG_MDIO_R, "%s: %2d reg, 0x%04x val\n",
+ __func__, reg, res);
+
+done:
+ spin_unlock_irqrestore(&priv->mdio_lock, flags);
+ return res;
+}
+
+/*
+ * write MDIO register
+ */
+static void qfec_mdio_write(struct net_device *dev, int phy_id, int reg,
+ int val)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->mdio_lock, flags);
+
+ QFEC_LOG(QFEC_LOG_MDIO_W, "%s: %2d reg, %04x\n",
+ __func__, reg, val);
+
+ qfec_reg_write(priv, GMII_DATA_REG, val);
+
+ if (qfec_mdio_oper(dev, phy_id, reg, 1))
+ QFEC_LOG_ERR("%s: oper\n", __func__);
+
+ spin_unlock_irqrestore(&priv->mdio_lock, flags);
+}
+
+/*
+ * MDIO show
+ */
+static int qfec_mdio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ int n;
+ int l = 0;
+ int count = PAGE_SIZE;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+ for (n = 0; n < MAX_MDIO_REG; n++) {
+ if (!(n % 8))
+ l += snprintf(&buf[l], count - l, "\n %02x: ", n);
+
+ l += snprintf(&buf[l], count - l, " %04x",
+ qfec_mdio_read(to_net_dev(dev), priv->phy_id, n));
+ }
+ l += snprintf(&buf[l], count - l, "\n");
+
+ return l;
+}
+
+/*
+ * get auto-negotiation results
+ */
+#define QFEC_100 (LPA_100HALF | LPA_100FULL | LPA_100HALF)
+#define QFEC_100_FD (LPA_100FULL | LPA_100BASE4)
+#define QFEC_10 (LPA_10HALF | LPA_10FULL)
+#define QFEC_10_FD LPA_10FULL
+
+static void qfec_get_an(struct net_device *dev, uint32_t *spd, uint32_t *dplx)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ uint32_t advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
+ uint32_t lpa = qfec_mdio_read(dev, priv->phy_id, MII_LPA);
+ uint32_t mastCtrl = qfec_mdio_read(dev, priv->phy_id, MII_CTRL1000);
+ uint32_t mastStat = qfec_mdio_read(dev, priv->phy_id, MII_STAT1000);
+ uint32_t anExp = qfec_mdio_read(dev, priv->phy_id, MII_EXPANSION);
+ uint32_t status = advert & lpa;
+ uint32_t flow;
+
+ if (priv->mii.supports_gmii) {
+ if (((anExp & QFEC_MII_EXP_MASK) == QFEC_MII_EXP_MASK)
+ && (mastCtrl & ADVERTISE_1000FULL)
+ && (mastStat & LPA_1000FULL)) {
+ *spd = SPD_1000;
+ *dplx = F_DPLX;
+ goto pause;
+ }
+
+ else if (((anExp & QFEC_MII_EXP_MASK) == QFEC_MII_EXP_MASK)
+ && (mastCtrl & ADVERTISE_1000HALF)
+ && (mastStat & LPA_1000HALF)) {
+ *spd = SPD_1000;
+ *dplx = H_DPLX;
+ goto pause;
+ }
+ }
+
+ /* mii speeds */
+ if (status & QFEC_100) {
+ *spd = SPD_100;
+ *dplx = status & QFEC_100_FD ? F_DPLX : H_DPLX;
+ }
+
+ else if (status & QFEC_10) {
+ *spd = SPD_10;
+ *dplx = status & QFEC_10_FD ? F_DPLX : H_DPLX;
+ }
+
+ /* check pause */
+pause:
+ flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
+ flow &= ~(FLOW_CONTROL_TFE | FLOW_CONTROL_RFE);
+
+ if (status & ADVERTISE_PAUSE_CAP) {
+ flow |= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
+ } else if (status & ADVERTISE_PAUSE_ASYM) {
+ if (lpa & ADVERTISE_PAUSE_CAP)
+ flow |= FLOW_CONTROL_TFE;
+ else if (advert & ADVERTISE_PAUSE_CAP)
+ flow |= FLOW_CONTROL_RFE;
+ }
+
+ qfec_reg_write(priv, FLOW_CONTROL_REG, flow);
+}
+
+/*
+ * monitor phy status, and process auto-neg results when changed
+ */
+
+static void qfec_phy_monitor(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct qfec_priv *priv = netdev_priv(dev);
+ unsigned int spd = H_DPLX;
+ unsigned int dplx = F_DPLX;
+
+ mod_timer(&priv->phy_tmr, jiffies + HZ);
+
+ if (mii_link_ok(&priv->mii) && !netif_carrier_ok(priv->net_dev)) {
+ qfec_get_an(dev, &spd, &dplx);
+ qfec_speed_cfg(dev, spd, dplx);
+ QFEC_LOG(QFEC_LOG_DBG, "%s: link up, %d spd, %d dplx\n",
+ __func__, spd, dplx);
+
+ netif_carrier_on(dev);
+ }
+
+ else if (!mii_link_ok(&priv->mii) && netif_carrier_ok(priv->net_dev)) {
+ QFEC_LOG(QFEC_LOG_DBG, "%s: link down\n", __func__);
+ netif_carrier_off(dev);
+ }
+}
+
+/*
+ * dealloc buffer descriptor memory
+ */
+
+static void qfec_mem_dealloc(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+
+ dma_free_coherent(&dev->dev,
+ priv->bd_size, priv->bd_base, priv->tbd_dma);
+ priv->bd_base = 0;
+}
+
+/*
+ * allocate shared device memory for TX/RX buf-desc (and buffers)
+ */
+
+static int qfec_mem_alloc(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
+
+ priv->bd_size =
+ (priv->n_tbd + priv->n_rbd) * sizeof(struct qfec_buf_desc);
+
+ priv->p_tbd = kcalloc(priv->n_tbd, sizeof(struct buf_desc), GFP_KERNEL);
+ if (!priv->p_tbd) {
+ QFEC_LOG_ERR("%s: kcalloc failed p_tbd\n", __func__);
+ return -ENOMEM;
+ }
+
+ priv->p_rbd = kcalloc(priv->n_rbd, sizeof(struct buf_desc), GFP_KERNEL);
+ if (!priv->p_rbd) {
+ QFEC_LOG_ERR("%s: kcalloc failed p_rbd\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* alloc mem for buf-desc, if not already alloc'd */
+ if (!priv->bd_base) {
+ priv->bd_base = dma_alloc_coherent(&dev->dev,
+ priv->bd_size, &priv->tbd_dma,
+ GFP_KERNEL | __GFP_DMA);
+ }
+
+ if (!priv->bd_base) {
+ QFEC_LOG_ERR("%s: dma_alloc_coherent failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ priv->rbd_dma = priv->tbd_dma
+ + (priv->n_tbd * sizeof(struct qfec_buf_desc));
+
+ QFEC_LOG(QFEC_LOG_DBG,
+ " %s: 0x%08x size, %d n_tbd, %d n_rbd\n",
+ __func__, priv->bd_size, priv->n_tbd, priv->n_rbd);
+
+ return 0;
+}
+
+/*
+ * display buffer descriptors
+ */
+
+static int qfec_bd_fmt(char *buf, int size, struct buf_desc *p_bd)
+{
+ return snprintf(buf, size,
+ "%8p: %08x %08x %8p %8p %8p %8p %8p %x",
+ p_bd, qfec_bd_status_get(p_bd),
+ qfec_bd_ctl_get(p_bd), qfec_bd_pbuf_get(p_bd),
+ qfec_bd_next_get(p_bd), qfec_bd_skbuf_get(p_bd),
+ qfec_bd_virt_get(p_bd), qfec_bd_phys_get(p_bd),
+ qfec_bd_last_bd(p_bd));
+}
+
+static int qfec_bd_show(char *buf, int count, struct buf_desc *p_bd, int n_bd,
+ struct ring *p_ring, char *label)
+{
+ int l = 0;
+ int n;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, label);
+
+ l += snprintf(&buf[l], count, "%s: %s\n", __func__, label);
+ if (!p_bd)
+ return l;
+
+ n_bd = n_bd > MAX_N_BD ? MAX_N_BD : n_bd;
+
+ for (n = 0; n < n_bd; n++, p_bd++) {
+ l += qfec_bd_fmt(&buf[l], count - l, p_bd);
+ l += snprintf(&buf[l], count - l, "%s%s\n",
+ (qfec_ring_head(p_ring) == n ? " < h" : ""),
+ (qfec_ring_tail(p_ring) == n ? " < t" : ""));
+ }
+
+ return l;
+}
+
+/*
+ * display TX BDs
+ */
+static int qfec_bd_tx_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ int count = PAGE_SIZE;
+
+ return qfec_bd_show(buf, count, priv->p_tbd, priv->n_tbd,
+ &priv->ring_tbd, "TX");
+}
+
+/*
+ * display RX BDs
+ */
+static int qfec_bd_rx_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ int count = PAGE_SIZE;
+
+ return qfec_bd_show(buf, count, priv->p_rbd, priv->n_rbd,
+ &priv->ring_rbd, "RX");
+}
+
+/*
+ * process timestamp values
+ * The pbuf and next fields of the buffer descriptors are overwritten
+ * with the timestamp high and low register values.
+ *
+ * The low register is incremented by the value in the subsec_increment
+ * register and overflows at 0x8000 0000 causing the high register to
+ * increment.
+ *
+ * The subsec_increment register is recommended to be set to the number
+ * of nanosec corresponding to each clock tic, scaled by 2^31 / 10^9
+ * (e.g. 40 * 2^32 / 10^9 = 85.9, or 86 for 25 MHz). However, the
+ * rounding error in this case will result in a 1 sec error / ~14 mins.
+ *
+ * An alternate approach is used. The subsec_increment is set to 1,
+ * and the concatenation of the 2 timestamp registers used to count
+ * clock tics. The 63-bit result is manipulated to determine the number
+ * of sec and ns.
+ */
+
+/*
+ * convert 19.2 MHz clock tics into sec/ns
+ */
+#define TS_LOW_REG_BITS 31
+
+#define MILLION 1000000UL
+#define BILLION 1000000000UL
+
+#define F_CLK 19200000UL
+#define F_CLK_PRE_SC 24
+#define F_CLK_INV_Q 56
+#define F_CLK_INV (((unsigned long long)1 << F_CLK_INV_Q) / F_CLK)
+#define F_CLK_TO_NS_Q 25
+#define F_CLK_TO_NS \
+ (((((unsigned long long)1<<F_CLK_TO_NS_Q)*BILLION)+(F_CLK-1))/F_CLK)
+#define US_TO_F_CLK_Q 20
+#define US_TO_F_CLK \
+ (((((unsigned long long)1<<US_TO_F_CLK_Q)*F_CLK)+(MILLION-1))/MILLION)
+
+static inline void qfec_get_sec(uint64_t *cnt,
+ uint32_t *sec, uint32_t *ns)
+{
+ unsigned long long t;
+ unsigned long long subsec;
+
+ t = *cnt >> F_CLK_PRE_SC;
+ t *= F_CLK_INV;
+ t >>= F_CLK_INV_Q - F_CLK_PRE_SC;
+ *sec = t;
+
+ t = *cnt - (t * F_CLK);
+ subsec = t;
+
+ if (subsec >= F_CLK) {
+ subsec -= F_CLK;
+ *sec += 1;
+ }
+
+ subsec *= F_CLK_TO_NS;
+ subsec >>= F_CLK_TO_NS_Q;
+ *ns = subsec;
+}
+
+/*
+ * read ethernet timestamp registers, pass up raw register values
+ * and values converted to sec/ns
+ */
+static void qfec_read_timestamp(struct buf_desc *p_bd,
+ struct skb_shared_hwtstamps *ts)
+{
+ unsigned long long cnt;
+ unsigned int sec;
+ unsigned int subsec;
+
+ cnt = (unsigned long)qfec_bd_next_get(p_bd);
+ cnt <<= TS_LOW_REG_BITS;
+ cnt |= (unsigned long)qfec_bd_pbuf_get(p_bd);
+
+ /* report raw counts as concatenated 63 bits */
+ sec = cnt >> 32;
+ subsec = cnt & 0xffffffff;
+
+ ts->hwtstamp = ktime_set(sec, subsec);
+
+ /* translate counts to sec and ns */
+ qfec_get_sec(&cnt, &sec, &subsec);
+
+ ts->syststamp = ktime_set(sec, subsec);
+}
+
+/*
+ * capture the current system time in the timestamp registers
+ */
+static int qfec_cmd(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ struct timeval tv;
+
+ if (!strncmp(buf, "setTs", 5)) {
+ unsigned long long cnt;
+ uint32_t ts_hi;
+ uint32_t ts_lo;
+ unsigned long long subsec;
+
+ do_gettimeofday(&tv);
+
+ /* convert raw sec/usec to ns */
+ subsec = tv.tv_usec;
+ subsec *= US_TO_F_CLK;
+ subsec >>= US_TO_F_CLK_Q;
+
+ cnt = tv.tv_sec;
+ cnt *= F_CLK;
+ cnt += subsec;
+
+ ts_hi = cnt >> 31;
+ ts_lo = cnt & 0x7FFFFFFF;
+
+ qfec_reg_write(priv, TS_HI_UPDT_REG, ts_hi);
+ qfec_reg_write(priv, TS_LO_UPDT_REG, ts_lo);
+
+ qfec_reg_write(priv, TS_CTL_REG,
+ qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSINIT);
+ } else
+ pr_err("%s: unknown cmd, %s.\n", __func__, buf);
+
+ return strnlen(buf, count);
+}
+
+/*
+ * display ethernet tstamp and system time
+ */
+static int qfec_tstamp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ int count = PAGE_SIZE;
+ int l;
+ struct timeval tv;
+ unsigned long long cnt;
+ uint32_t sec;
+ uint32_t ns;
+ uint32_t ts_hi;
+ uint32_t ts_lo;
+
+ /* insure that ts_hi didn't increment during read */
+ do {
+ ts_hi = qfec_reg_read(priv, TS_HIGH_REG);
+ ts_lo = qfec_reg_read(priv, TS_LOW_REG);
+ } while (ts_hi != qfec_reg_read(priv, TS_HIGH_REG));
+
+ cnt = ts_hi;
+ cnt <<= TS_LOW_REG_BITS;
+ cnt |= ts_lo;
+
+ do_gettimeofday(&tv);
+
+ ts_hi = cnt >> 32;
+ ts_lo = cnt & 0xffffffff;
+
+ qfec_get_sec(&cnt, &sec, &ns);
+
+ l = snprintf(buf, count,
+ "%12u.%09u sec 0x%08x 0x%08x tstamp %12u.%06u time-of-day\n",
+ sec, ns, ts_hi, ts_lo, (int)tv.tv_sec, (int)tv.tv_usec);
+
+ return l;
+}
+
+/*
+ * free transmitted skbufs from buffer-descriptor no owned by HW
+ */
+static int qfec_tx_replenish(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ struct ring *p_ring = &priv->ring_tbd;
+ struct buf_desc *p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ CNTR_INC(priv, tx_replenish);
+
+ spin_lock_irqsave(&priv->xmit_lock, flags);
+
+ while (!qfec_ring_empty(p_ring)) {
+ if (qfec_bd_own(p_bd))
+ break; /* done for now */
+
+ skb = qfec_bd_skbuf_get(p_bd);
+ if (unlikely(skb == NULL)) {
+ QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
+ CNTR_INC(priv, tx_skb_null);
+ break;
+ }
+
+ qfec_reg_write(priv, STATUS_REG,
+ STATUS_REG_TU | STATUS_REG_TI);
+
+ /* retrieve timestamp if requested */
+ if (qfec_bd_status_get(p_bd) & BUF_TX_TTSS) {
+ CNTR_INC(priv, ts_tx_rtn);
+ qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
+ skb_tstamp_tx(skb, skb_hwtstamps(skb));
+ }
+
+ /* update statistics before freeing skb */
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+
+ dma_unmap_single(&dev->dev, (dma_addr_t) qfec_bd_pbuf_get(p_bd),
+ skb->len, DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ qfec_bd_skbuf_set(p_bd, NULL);
+
+ qfec_ring_tail_adv(p_ring);
+ p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)];
+ }
+
+ spin_unlock_irqrestore(&priv->xmit_lock, flags);
+
+ qfec_queue_start(dev);
+
+ return 0;
+}
+
+/*
+ * clear ownership bits of all TX buf-desc and release the sk-bufs
+ */
+static void qfec_tx_timeout(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ struct buf_desc *bd = priv->p_tbd;
+ int n;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+ CNTR_INC(priv, tx_timeout);
+
+ for (n = 0; n < priv->n_tbd; n++, bd++)
+ qfec_bd_own_clr(bd);
+
+ qfec_tx_replenish(dev);
+}
+
+/*
+ * rx() - process a received frame
+ */
+static void qfec_rx_int(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ struct ring *p_ring = &priv->ring_rbd;
+ struct buf_desc *p_bd = priv->p_latest_rbd;
+ uint32_t desc_status;
+ uint32_t mis_fr_reg;
+
+ desc_status = qfec_bd_status_get(p_bd);
+ mis_fr_reg = qfec_reg_read(priv, MIS_FR_REG);
+
+ CNTR_INC(priv, rx_int);
+
+ /* check that valid interrupt occurred */
+ if (unlikely(desc_status & BUF_OWN))
+ return;
+
+ /* accumulate missed-frame count (reg reset when read) */
+ priv->stats.rx_missed_errors += mis_fr_reg
+ & MIS_FR_REG_MISS_CNT;
+
+ /* process all unowned frames */
+ while (!(desc_status & BUF_OWN) && (!qfec_ring_full(p_ring))) {
+ struct sk_buff *skb;
+ struct buf_desc *p_bd_next;
+
+ skb = qfec_bd_skbuf_get(p_bd);
+
+ if (unlikely(skb == NULL)) {
+ QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
+ CNTR_INC(priv, rx_skb_null);
+ break;
+ }
+
+ /* cache coherency before skb->data is accessed */
+ dma_unmap_single(&dev->dev,
+ (dma_addr_t) qfec_bd_phys_get(p_bd),
+ ETH_BUF_SIZE, DMA_FROM_DEVICE);
+ prefetch(skb->data);
+
+ if (unlikely(desc_status & BUF_RX_ES)) {
+ priv->stats.rx_dropped++;
+ CNTR_INC(priv, rx_dropped);
+ dev_kfree_skb(skb);
+ } else {
+ qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
+
+ skb->len = BUF_RX_FL_GET_FROM_STATUS(desc_status);
+
+ if (priv->state & timestamping) {
+ CNTR_INC(priv, ts_rec);
+ qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
+ }
+
+ /* update statistics before freeing skb */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += skb->len;
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (NET_RX_DROP == netif_rx(skb)) {
+ priv->stats.rx_dropped++;
+ CNTR_INC(priv, rx_dropped);
+ }
+ CNTR_INC(priv, netif_rx_cntr);
+ }
+
+ if (p_bd != priv->p_ending_rbd)
+ p_bd_next = p_bd + 1;
+ else
+ p_bd_next = priv->p_rbd;
+ desc_status = qfec_bd_status_get(p_bd_next);
+
+ qfec_bd_skbuf_set(p_bd, NULL);
+
+ qfec_ring_head_adv(p_ring);
+ p_bd = p_bd_next;
+ }
+
+ priv->p_latest_rbd = p_bd;
+
+ /* replenish bufs */
+ while (!qfec_ring_empty(p_ring)) {
+ if (qfec_rbd_init(dev, &priv->p_rbd[qfec_ring_tail(p_ring)]))
+ break;
+ qfec_ring_tail_adv(p_ring);
+ }
+
+ qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
+}
+
+/*
+ * isr() - interrupt service routine
+ * determine cause of interrupt and invoke/schedule appropriate
+ * processing or error handling
+ */
+#define ISR_ERR_CHK(priv, status, interrupt, cntr) \
+ if (status & interrupt) \
+ CNTR_INC(priv, cntr)
+
+static irqreturn_t qfec_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct qfec_priv *priv = netdev_priv(dev);
+ uint32_t status = qfec_reg_read(priv, STATUS_REG);
+ uint32_t int_bits = STATUS_REG_NIS | STATUS_REG_AIS;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, dev->name);
+
+ /* abnormal interrupt */
+ if (status & STATUS_REG_AIS) {
+ QFEC_LOG(QFEC_LOG_DBG, "%s: abnormal status 0x%08x\n",
+ __func__, status);
+
+ ISR_ERR_CHK(priv, status, STATUS_REG_RU, rx_buf_unavail);
+ ISR_ERR_CHK(priv, status, STATUS_REG_FBI, fatal_bus);
+
+ ISR_ERR_CHK(priv, status, STATUS_REG_RWT, rx_watchdog);
+ ISR_ERR_CHK(priv, status, STATUS_REG_RPS, rx_proc_stopped);
+ ISR_ERR_CHK(priv, status, STATUS_REG_UNF, tx_underflow);
+
+ ISR_ERR_CHK(priv, status, STATUS_REG_OVF, rx_overflow);
+ ISR_ERR_CHK(priv, status, STATUS_REG_TJT, tx_jabber_tmout);
+ ISR_ERR_CHK(priv, status, STATUS_REG_TPS, tx_proc_stopped);
+
+ int_bits |= STATUS_REG_AIS_BITS;
+ CNTR_INC(priv, abnorm_int);
+ }
+
+ if (status & STATUS_REG_NIS)
+ CNTR_INC(priv, norm_int);
+
+ /* receive interrupt */
+ if (status & STATUS_REG_RI) {
+ CNTR_INC(priv, rx_isr);
+ qfec_rx_int(dev);
+ }
+
+ /* transmit interrupt */
+ if (status & STATUS_REG_TI) {
+ CNTR_INC(priv, tx_isr);
+ qfec_tx_replenish(dev);
+ }
+
+ /* gmac interrupt */
+ if (status & (STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI)) {
+ status &= ~(STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI);
+ CNTR_INC(priv, gmac_isr);
+ int_bits |= STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI;
+ qfec_reg_read(priv, SG_RG_SMII_STATUS_REG);
+ }
+
+ /* clear interrupts */
+ qfec_reg_write(priv, STATUS_REG, int_bits);
+ CNTR_INC(priv, isr);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * open () - register system resources (IRQ, DMA, ...)
+ * turn on HW, perform device setup.
+ */
+static int qfec_open(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ struct buf_desc *p_bd;
+ struct ring *p_ring;
+ struct qfec_buf_desc *p_desc;
+ int n;
+ int res = 0;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
+
+ if (!dev) {
+ res = -EINVAL;
+ goto err;
+ }
+
+ /* allocate TX/RX buffer-descriptors and buffers */
+
+ res = qfec_mem_alloc(dev);
+ if (res)
+ goto err;
+
+ /* initialize TX */
+ p_desc = priv->bd_base;
+
+ for (n = 0, p_bd = priv->p_tbd; n < priv->n_tbd; n++, p_bd++) {
+ p_bd->p_desc = p_desc++;
+
+ if (n == (priv->n_tbd - 1))
+ qfec_bd_last_bd_set(p_bd);
+
+ qfec_bd_own_clr(p_bd); /* clear ownership */
+ }
+
+ qfec_ring_init(&priv->ring_tbd, priv->n_tbd, priv->n_tbd);
+
+ priv->tx_ic_mod = priv->n_tbd / TX_BD_TI_RATIO;
+ if (priv->tx_ic_mod == 0)
+ priv->tx_ic_mod = 1;
+
+ /* initialize RX buffer descriptors and allocate sk_bufs */
+ p_ring = &priv->ring_rbd;
+ qfec_ring_init(p_ring, priv->n_rbd, 0);
+ qfec_bd_last_bd_set(&priv->p_rbd[priv->n_rbd - 1]);
+
+ for (n = 0, p_bd = priv->p_rbd; n < priv->n_rbd; n++, p_bd++) {
+ p_bd->p_desc = p_desc++;
+
+ if (qfec_rbd_init(dev, p_bd))
+ break;
+ qfec_ring_tail_adv(p_ring);
+ }
+
+ priv->p_latest_rbd = priv->p_rbd;
+ priv->p_ending_rbd = priv->p_rbd + priv->n_rbd - 1;
+
+ /* config ptp clock */
+ qfec_ptp_cfg(priv);
+
+ /* configure PHY - must be set before reset/hw_init */
+ priv->mii.supports_gmii = mii_check_gmii_support(&priv->mii);
+ if (priv->mii.supports_gmii) {
+ QFEC_LOG_ERR("%s: RGMII\n", __func__);
+ qfec_intf_sel(priv, INTFC_RGMII);
+ } else {
+ QFEC_LOG_ERR("%s: MII\n", __func__);
+ qfec_intf_sel(priv, INTFC_MII);
+ }
+
+ /* initialize controller after BDs allocated */
+ res = qfec_hw_init(priv);
+ if (res)
+ goto err1;
+
+ /* get/set (primary) MAC address */
+ qfec_set_adr_regs(priv, dev->dev_addr);
+ qfec_set_rx_mode(dev);
+
+ /* start phy monitor */
+ QFEC_LOG(QFEC_LOG_DBG, " %s: start timer\n", __func__);
+ netif_carrier_off(priv->net_dev);
+ setup_timer(&priv->phy_tmr, qfec_phy_monitor, (unsigned long)dev);
+ mod_timer(&priv->phy_tmr, jiffies + HZ);
+
+ /* driver supports AN capable PHY only */
+ qfec_mdio_write(dev, priv->phy_id, MII_BMCR, BMCR_RESET);
+ res = (BMCR_ANENABLE|BMCR_ANRESTART);
+ qfec_mdio_write(dev, priv->phy_id, MII_BMCR, res);
+
+ /* initialize interrupts */
+ QFEC_LOG(QFEC_LOG_DBG, " %s: request irq %d\n", __func__, dev->irq);
+ res = request_irq(dev->irq, qfec_int, 0, dev->name, dev);
+ if (res)
+ goto err1;
+
+ /* enable controller */
+ qfec_hw_enable(priv);
+ netif_start_queue(dev);
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %08x link, %08x carrier\n", __func__,
+ mii_link_ok(&priv->mii), netif_carrier_ok(priv->net_dev));
+
+ QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
+ return 0;
+
+err1:
+ qfec_mem_dealloc(dev);
+err:
+ QFEC_LOG_ERR("%s: error - %d\n", __func__, res);
+ return res;
+}
+
+/*
+ * stop() - "reverse operations performed at open time"
+ */
+static int qfec_stop(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ struct buf_desc *p_bd;
+ struct sk_buff *skb;
+ int n;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ del_timer_sync(&priv->phy_tmr);
+
+ qfec_hw_disable(priv);
+ qfec_queue_stop(dev);
+ free_irq(dev->irq, dev);
+
+ /* free all pending sk_bufs */
+ for (n = priv->n_rbd, p_bd = priv->p_rbd; n > 0; n--, p_bd++) {
+ skb = qfec_bd_skbuf_get(p_bd);
+ if (skb)
+ dev_kfree_skb(skb);
+ }
+
+ for (n = priv->n_tbd, p_bd = priv->p_tbd; n > 0; n--, p_bd++) {
+ skb = qfec_bd_skbuf_get(p_bd);
+ if (skb)
+ dev_kfree_skb(skb);
+ }
+
+ qfec_mem_dealloc(dev);
+
+ QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
+
+ return 0;
+}
+
+static int qfec_set_config(struct net_device *dev, struct ifmap *map)
+{
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+ return 0;
+}
+
+/*
+ * pass data from skbuf to buf-desc
+ */
+static int qfec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ struct ring *p_ring = &priv->ring_tbd;
+ struct buf_desc *p_bd;
+ uint32_t ctrl = 0;
+ int ret = NETDEV_TX_OK;
+ unsigned long flags;
+
+ CNTR_INC(priv, xmit);
+
+ spin_lock_irqsave(&priv->xmit_lock, flags);
+
+ /* If there is no room, on the ring try to free some up */
+ if (qfec_ring_room(p_ring) == 0)
+ qfec_tx_replenish(dev);
+
+ /* stop queuing if no resources available */
+ if (qfec_ring_room(p_ring) == 0) {
+ qfec_queue_stop(dev);
+ CNTR_INC(priv, tx_no_resource);
+
+ ret = NETDEV_TX_BUSY;
+ goto done;
+ }
+
+ /* locate and save *sk_buff */
+ p_bd = &priv->p_tbd[qfec_ring_head(p_ring)];
+ qfec_bd_skbuf_set(p_bd, skb);
+
+ /* set DMA ptr to sk_buff data and write cache to memory */
+ qfec_bd_pbuf_set(p_bd, (void *)
+ dma_map_single(&dev->dev,
+ (void *)skb->data, skb->len, DMA_TO_DEVICE));
+
+ ctrl = skb->len;
+ if (!(qfec_ring_head(p_ring) % priv->tx_ic_mod))
+ ctrl |= BUF_TX_IC; /* interrupt on complete */
+
+ /* check if timestamping enabled and requested */
+ if (priv->state & timestamping) {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ CNTR_INC(priv, ts_tx_en);
+ ctrl |= BUF_TX_IC; /* interrupt on complete */
+ ctrl |= BUF_TX_TTSE; /* enable timestamp */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ }
+
+ if (qfec_bd_last_bd(p_bd))
+ ctrl |= BUF_RX_RER;
+
+ /* no gather, no multi buf frames */
+ ctrl |= BUF_TX_FS | BUF_TX_LS; /* 1st and last segment */
+
+ qfec_bd_ctl_wr(p_bd, ctrl);
+ qfec_bd_status_set(p_bd, BUF_OWN);
+
+ qfec_ring_head_adv(p_ring);
+ qfec_reg_write(priv, TX_POLL_DEM_REG, 1); /* poll */
+
+done:
+ spin_unlock_irqrestore(&priv->xmit_lock, flags);
+
+ return ret;
+}
+
+static int qfec_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config *cfg = (struct hwtstamp_config *) ifr;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ if (cmd == SIOCSHWTSTAMP) {
+ CNTR_INC(priv, ts_ioctl);
+ QFEC_LOG(QFEC_LOG_DBG,
+ "%s: SIOCSHWTSTAMP - %x flags %x tx %x rx\n",
+ __func__, cfg->flags, cfg->tx_type, cfg->rx_filter);
+
+ cfg->flags = 0;
+ cfg->tx_type = HWTSTAMP_TX_ON;
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
+
+ priv->state |= timestamping;
+ qfec_reg_write(priv, TS_CTL_REG,
+ qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSENALL);
+
+ return 0;
+ }
+
+ return generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL);
+}
+
+static struct net_device_stats *qfec_get_stats(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+
+ QFEC_LOG(QFEC_LOG_DBG2, "qfec_stats:\n");
+
+ priv->stats.multicast = qfec_reg_read(priv, NUM_MULTCST_FRM_RCVD_G);
+
+ return &priv->stats;
+}
+
+/*
+ * accept new mac address
+ */
+static int qfec_set_mac_address(struct net_device *dev, void *p)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ qfec_set_adr_regs(priv, dev->dev_addr);
+
+ return 0;
+}
+
+/*
+ * read discontinuous MAC address from corrected fuse memory region
+ */
+
+static int qfec_get_mac_address(char *buf, char *mac_base, int nBytes)
+{
+ static int offset[] = { 0, 1, 2, 3, 4, 8 };
+ int n;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ for (n = 0; n < nBytes; n++)
+ buf[n] = ioread8(mac_base + offset[n]);
+
+ /* check that MAC programmed */
+ if ((buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5]) == 0) {
+ QFEC_LOG_ERR("%s: null MAC address\n", __func__);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+/*
+ * static definition of driver functions
+ */
+static const struct net_device_ops qfec_netdev_ops = {
+ .ndo_open = qfec_open,
+ .ndo_stop = qfec_stop,
+ .ndo_start_xmit = qfec_xmit,
+
+ .ndo_do_ioctl = qfec_do_ioctl,
+ .ndo_tx_timeout = qfec_tx_timeout,
+ .ndo_set_mac_address = qfec_set_mac_address,
+ .ndo_set_rx_mode = qfec_set_rx_mode,
+
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+
+ .ndo_get_stats = qfec_get_stats,
+ .ndo_set_config = qfec_set_config,
+};
+
+/*
+ * ethtool functions
+ */
+
+static int qfec_nway_reset(struct net_device *dev)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ return mii_nway_restart(&priv->mii);
+}
+
+/*
+ * speed, duplex, auto-neg settings
+ */
+static void qfec_ethtool_getpauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pp)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ u32 flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
+ u32 advert;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ /* report current settings */
+ pp->tx_pause = (flow & FLOW_CONTROL_TFE) != 0;
+ pp->rx_pause = (flow & FLOW_CONTROL_RFE) != 0;
+
+ /* report if pause is being advertised */
+ advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
+ pp->autoneg =
+ (advert & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
+}
+
+static int qfec_ethtool_setpauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pp)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ u32 advert;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %d aneg, %d rx, %d tx\n", __func__,
+ pp->autoneg, pp->rx_pause, pp->tx_pause);
+
+ advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
+ advert &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+ /* If pause autonegotiation is enabled, but both rx and tx are not
+ * because neither was specified in the ethtool cmd,
+ * enable both symetrical and asymetrical pause.
+ * otherwise, only enable the pause mode indicated by rx/tx.
+ */
+ if (pp->autoneg) {
+ if (pp->rx_pause)
+ advert |= ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP;
+ else if (pp->tx_pause)
+ advert |= ADVERTISE_PAUSE_ASYM;
+ else
+ advert |= ADVERTISE_PAUSE_CAP;
+ }
+
+ qfec_mdio_write(dev, priv->phy_id, MII_ADVERTISE, advert);
+
+ return 0;
+}
+
+/*
+ * ethtool ring parameter (-g/G) support
+ */
+
+/*
+ * setringparamam - change the tx/rx ring lengths
+ */
+#define MIN_RING_SIZE 3
+#define MAX_RING_SIZE 1000
+static int qfec_ethtool_setringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ u32 timeout = 20;
+
+ /* notify stack the link is down */
+ netif_carrier_off(dev);
+
+ /* allow tx to complete & free skbufs on the tx ring */
+ do {
+ usleep_range(10000, 100000);
+ qfec_tx_replenish(dev);
+
+ if (timeout-- == 0) {
+ QFEC_LOG_ERR("%s: timeout\n", __func__);
+ return -ETIME;
+ }
+ } while (!qfec_ring_empty(&priv->ring_tbd));
+
+
+ qfec_stop(dev);
+
+ /* set tx ring size */
+ if (ring->tx_pending < MIN_RING_SIZE)
+ ring->tx_pending = MIN_RING_SIZE;
+ else if (ring->tx_pending > MAX_RING_SIZE)
+ ring->tx_pending = MAX_RING_SIZE;
+ priv->n_tbd = ring->tx_pending;
+
+ /* set rx ring size */
+ if (ring->rx_pending < MIN_RING_SIZE)
+ ring->rx_pending = MIN_RING_SIZE;
+ else if (ring->rx_pending > MAX_RING_SIZE)
+ ring->rx_pending = MAX_RING_SIZE;
+ priv->n_rbd = ring->rx_pending;
+
+
+ qfec_open(dev);
+
+ return 0;
+}
+
+/*
+ * getringparamam - returns local values
+ */
+static void qfec_ethtool_getringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ ring->rx_max_pending = MAX_RING_SIZE;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = MAX_RING_SIZE;
+
+ ring->rx_pending = priv->n_rbd;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+ ring->tx_pending = priv->n_tbd;
+}
+
+/*
+ * speed, duplex, auto-neg settings
+ */
+static int
+qfec_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ cmd->maxrxpkt = priv->n_rbd;
+ cmd->maxtxpkt = priv->n_tbd;
+
+ return mii_ethtool_gset(&priv->mii, cmd);
+}
+
+static int
+qfec_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ return mii_ethtool_sset(&priv->mii, cmd);
+}
+
+/*
+ * msg/debug level
+ */
+static u32 qfec_ethtool_getmsglevel(struct net_device *dev)
+{
+ return qfec_debug;
+}
+
+static void qfec_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ qfec_debug ^= level; /* toggle on/off */
+}
+
+/*
+ * register dump
+ */
+#define DMA_DMP_OFFSET 0x0000
+#define DMA_REG_OFFSET 0x1000
+#define DMA_REG_LEN 23
+
+#define MAC_DMP_OFFSET 0x0080
+#define MAC_REG_OFFSET 0x0000
+#define MAC_REG_LEN 55
+
+#define TS_DMP_OFFSET 0x0180
+#define TS_REG_OFFSET 0x0700
+#define TS_REG_LEN 15
+
+#define MDIO_DMP_OFFSET 0x0200
+#define MDIO_REG_LEN 16
+
+#define REG_SIZE (MDIO_DMP_OFFSET + (MDIO_REG_LEN * sizeof(short)))
+
+static int qfec_ethtool_getregs_len(struct net_device *dev)
+{
+ return REG_SIZE;
+}
+
+static void
+qfec_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ u32 *data = buf;
+ u16 *data16;
+ unsigned int i;
+ unsigned int j;
+ unsigned int n;
+
+ memset(buf, 0, REG_SIZE);
+
+ j = DMA_DMP_OFFSET / sizeof(u32);
+ for (i = DMA_REG_OFFSET, n = DMA_REG_LEN; n--; i += sizeof(u32))
+ data[j++] = htonl(qfec_reg_read(priv, i));
+
+ j = MAC_DMP_OFFSET / sizeof(u32);
+ for (i = MAC_REG_OFFSET, n = MAC_REG_LEN; n--; i += sizeof(u32))
+ data[j++] = htonl(qfec_reg_read(priv, i));
+
+ j = TS_DMP_OFFSET / sizeof(u32);
+ for (i = TS_REG_OFFSET, n = TS_REG_LEN; n--; i += sizeof(u32))
+ data[j++] = htonl(qfec_reg_read(priv, i));
+
+ data16 = (u16 *)&data[MDIO_DMP_OFFSET / sizeof(u32)];
+ for (i = 0, n = 0; i < MDIO_REG_LEN; i++)
+ data16[n++] = htons(qfec_mdio_read(dev, 0, i));
+
+ regs->len = REG_SIZE;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, regs->len);
+}
+
+/*
+ * statistics
+ * return counts of various ethernet activity.
+ * many of these are same as in struct net_device_stats
+ *
+ * missed-frames indicates the number of attempts made by the ethernet
+ * controller to write to a buffer-descriptor when the BD ownership
+ * bit was not set. The rxfifooverflow counter (0x1D4) is not
+ * available. The Missed Frame and Buffer Overflow Counter register
+ * (0x1020) is used, but has only 16-bits and is reset when read.
+ * It is read and updates the value in priv->stats.rx_missed_errors
+ * in qfec_rx_int().
+ */
+static char qfec_stats_strings[][ETH_GSTRING_LEN] = {
+ "TX good/bad Bytes ",
+ "TX Bytes ",
+ "TX good/bad Frames ",
+ "TX Bcast Frames ",
+ "TX Mcast Frames ",
+ "TX Unicast Frames ",
+ "TX Pause Frames ",
+ "TX Vlan Frames ",
+ "TX Frames 64 ",
+ "TX Frames 65-127 ",
+ "TX Frames 128-255 ",
+ "TX Frames 256-511 ",
+ "TX Frames 512-1023 ",
+ "TX Frames 1024+ ",
+ "TX Pause Frames ",
+ "TX Collisions ",
+ "TX Late Collisions ",
+ "TX Excessive Collisions ",
+
+ "RX good/bad Bytes ",
+ "RX Bytes ",
+ "RX good/bad Frames ",
+ "RX Bcast Frames ",
+ "RX Mcast Frames ",
+ "RX Unicast Frames ",
+ "RX Pause Frames ",
+ "RX Vlan Frames ",
+ "RX Frames 64 ",
+ "RX Frames 65-127 ",
+ "RX Frames 128-255 ",
+ "RX Frames 256-511 ",
+ "RX Frames 512-1023 ",
+ "RX Frames 1024+ ",
+ "RX Pause Frames ",
+ "RX Crc error Frames ",
+ "RX Length error Frames ",
+ "RX Alignment error Frames ",
+ "RX Runt Frames ",
+ "RX Oversize Frames ",
+ "RX Missed Frames ",
+
+};
+
+static u32 qfec_stats_regs[] = {
+
+ 69, 89, 70, 71, 72, 90, 92, 93,
+ 73, 74, 75, 76, 77, 78, 92, 84,
+ 86, 87,
+
+ 97, 98, 96, 99, 100, 113, 116, 118,
+ 107, 108, 109, 110, 111, 112, 116, 101,
+ 114, 102, 103, 106
+};
+
+static int qfec_stats_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct qfec_priv *priv = netdev_priv(to_net_dev(dev));
+ int count = PAGE_SIZE;
+ int l = 0;
+ int n;
+
+ QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+ for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++) {
+ l += snprintf(&buf[l], count - l, " %12u %s\n",
+ qfec_reg_read(priv,
+ qfec_stats_regs[n] * sizeof(uint32_t)),
+ qfec_stats_strings[n]);
+ }
+
+ return l;
+}
+
+static int qfec_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(qfec_stats_regs) + 1; /* missed frames */
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void qfec_ethtool_getstrings(struct net_device *dev, u32 stringset,
+ u8 *buf)
+{
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__,
+ sizeof(qfec_stats_strings));
+
+ memcpy(buf, qfec_stats_strings, sizeof(qfec_stats_strings));
+}
+
+static void qfec_ethtool_getstats(struct net_device *dev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct qfec_priv *priv = netdev_priv(dev);
+ int j = 0;
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)
+ data[j++] = qfec_reg_read(priv,
+ qfec_stats_regs[n] * sizeof(uint32_t));
+
+ data[j++] = priv->stats.rx_missed_errors;
+
+ stats->n_stats = j;
+}
+
+static void qfec_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, QFEC_NAME, sizeof(info->driver));
+ strlcpy(info->version, QFEC_DRV_VER, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
+
+ info->eedump_len = 0;
+ info->regdump_len = qfec_ethtool_getregs_len(dev);
+}
+
+/*
+ * ethtool ops table
+ */
+static const struct ethtool_ops qfec_ethtool_ops = {
+ .nway_reset = qfec_nway_reset,
+
+ .get_settings = qfec_ethtool_getsettings,
+ .set_settings = qfec_ethtool_setsettings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = qfec_ethtool_getdrvinfo,
+ .get_msglevel = qfec_ethtool_getmsglevel,
+ .set_msglevel = qfec_ethtool_setmsglevel,
+ .get_regs_len = qfec_ethtool_getregs_len,
+ .get_regs = qfec_ethtool_getregs,
+
+ .get_ringparam = qfec_ethtool_getringparam,
+ .set_ringparam = qfec_ethtool_setringparam,
+
+ .get_pauseparam = qfec_ethtool_getpauseparam,
+ .set_pauseparam = qfec_ethtool_setpauseparam,
+
+ .get_sset_count = qfec_get_sset_count,
+ .get_strings = qfec_ethtool_getstrings,
+ .get_ethtool_stats = qfec_ethtool_getstats,
+};
+
+/*
+ * create sysfs entries
+ */
+static DEVICE_ATTR(bd_tx, 0444, qfec_bd_tx_show, NULL);
+static DEVICE_ATTR(bd_rx, 0444, qfec_bd_rx_show, NULL);
+static DEVICE_ATTR(cfg, 0444, qfec_config_show, NULL);
+static DEVICE_ATTR(clk_reg, 0444, qfec_clk_reg_show, NULL);
+static DEVICE_ATTR(cmd, 0222, NULL, qfec_cmd);
+static DEVICE_ATTR(cntrs, 0444, qfec_cntrs_show, NULL);
+static DEVICE_ATTR(reg, 0444, qfec_reg_show, NULL);
+static DEVICE_ATTR(mdio, 0444, qfec_mdio_show, NULL);
+static DEVICE_ATTR(stats, 0444, qfec_stats_show, NULL);
+static DEVICE_ATTR(tstamp, 0444, qfec_tstamp_show, NULL);
+
+static void qfec_sysfs_create(struct net_device *dev)
+{
+ if (device_create_file(&(dev->dev), &dev_attr_bd_tx) ||
+ device_create_file(&(dev->dev), &dev_attr_bd_rx) ||
+ device_create_file(&(dev->dev), &dev_attr_cfg) ||
+ device_create_file(&(dev->dev), &dev_attr_clk_reg) ||
+ device_create_file(&(dev->dev), &dev_attr_cmd) ||
+ device_create_file(&(dev->dev), &dev_attr_cntrs) ||
+ device_create_file(&(dev->dev), &dev_attr_mdio) ||
+ device_create_file(&(dev->dev), &dev_attr_reg) ||
+ device_create_file(&(dev->dev), &dev_attr_stats) ||
+ device_create_file(&(dev->dev), &dev_attr_tstamp))
+ pr_err("qfec_sysfs_create failed to create sysfs files\n");
+}
+
+/*
+ * map a specified resource
+ */
+static int qfec_map_resource(struct platform_device *plat, int resource,
+ struct resource **priv_res,
+ void **addr)
+{
+ struct resource *res;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: 0x%x resource\n", __func__, resource);
+
+ /* allocate region to access controller registers */
+ *priv_res = res = platform_get_resource(plat, resource, 0);
+ if (!res) {
+ QFEC_LOG_ERR("%s: platform_get_resource failed\n", __func__);
+ return -ENODEV;
+ }
+
+ res = request_mem_region(res->start, res->end - res->start, QFEC_NAME);
+ if (!res) {
+ QFEC_LOG_ERR("%s: request_mem_region failed, %08x %08x\n",
+ __func__, res->start, res->end - res->start);
+ return -EBUSY;
+ }
+
+ *addr = ioremap(res->start, res->end - res->start);
+ if (!*addr)
+ return -ENOMEM;
+
+ QFEC_LOG(QFEC_LOG_DBG, " %s: io mapped from %p to %p\n",
+ __func__, (void *)res->start, *addr);
+
+ return 0;
+};
+
+/*
+ * free allocated io regions
+ */
+static void qfec_free_res(struct resource *res, void *base)
+{
+
+ if (res) {
+ if (base)
+ iounmap((void __iomem *)base);
+
+ release_mem_region(res->start, res->end - res->start);
+ }
+};
+
+/*
+ * probe function that obtain configuration info and allocate net_device
+ */
+static int __devinit qfec_probe(struct platform_device *plat)
+{
+ struct net_device *dev;
+ struct qfec_priv *priv;
+ int ret = 0;
+
+ /* allocate device */
+ dev = alloc_etherdev(sizeof(struct qfec_priv));
+ if (!dev) {
+ QFEC_LOG_ERR("%s: alloc_etherdev failed\n", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %08x dev\n", __func__, (int)dev);
+
+ qfec_dev = dev;
+ SET_NETDEV_DEV(dev, &plat->dev);
+
+ dev->netdev_ops = &qfec_netdev_ops;
+ dev->ethtool_ops = &qfec_ethtool_ops;
+ dev->watchdog_timeo = 2 * HZ;
+ dev->irq = platform_get_irq(plat, 0);
+
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ /* initialize private data */
+ priv = (struct qfec_priv *)netdev_priv(dev);
+ memset((void *)priv, 0, sizeof(priv));
+
+ priv->net_dev = dev;
+ platform_set_drvdata(plat, dev);
+
+ priv->n_tbd = TX_BD_NUM;
+ priv->n_rbd = RX_BD_NUM;
+
+ /* initialize phy structure */
+ priv->mii.phy_id_mask = 0x1F;
+ priv->mii.reg_num_mask = 0x1F;
+ priv->mii.dev = dev;
+ priv->mii.mdio_read = qfec_mdio_read;
+ priv->mii.mdio_write = qfec_mdio_write;
+
+ /* map register regions */
+ ret = qfec_map_resource(
+ plat, IORESOURCE_MEM, &priv->mac_res, &priv->mac_base);
+ if (ret) {
+ QFEC_LOG_ERR("%s: IORESOURCE_MEM mac failed\n", __func__);
+ goto err1;
+ }
+
+ ret = qfec_map_resource(
+ plat, IORESOURCE_IO, &priv->clk_res, &priv->clk_base);
+ if (ret) {
+ QFEC_LOG_ERR("%s: IORESOURCE_IO clk failed\n", __func__);
+ goto err2;
+ }
+
+ ret = qfec_map_resource(
+ plat, IORESOURCE_DMA, &priv->fuse_res, &priv->fuse_base);
+ if (ret) {
+ QFEC_LOG_ERR("%s: IORESOURCE_DMA fuse failed\n", __func__);
+ goto err3;
+ }
+
+ /* initialize MAC addr */
+ ret = qfec_get_mac_address(dev->dev_addr, priv->fuse_base,
+ MAC_ADDR_SIZE);
+ if (ret)
+ goto err4;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n",
+ __func__,
+ dev->dev_addr[0], dev->dev_addr[1],
+ dev->dev_addr[2], dev->dev_addr[3],
+ dev->dev_addr[4], dev->dev_addr[5]);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ QFEC_LOG_ERR("%s: register_netdev failed\n", __func__);
+ goto err4;
+ }
+
+ spin_lock_init(&priv->mdio_lock);
+ spin_lock_init(&priv->xmit_lock);
+ qfec_sysfs_create(dev);
+
+ return 0;
+
+ /* error handling */
+err4:
+ qfec_free_res(priv->fuse_res, priv->fuse_base);
+err3:
+ qfec_free_res(priv->clk_res, priv->clk_base);
+err2:
+ qfec_free_res(priv->mac_res, priv->mac_base);
+err1:
+ free_netdev(dev);
+err:
+ QFEC_LOG_ERR("%s: err\n", __func__);
+ return ret;
+}
+
+/*
+ * module remove
+ */
+static int __devexit qfec_remove(struct platform_device *plat)
+{
+ struct net_device *dev = platform_get_drvdata(plat);
+ struct qfec_priv *priv = netdev_priv(dev);
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ platform_set_drvdata(plat, NULL);
+
+ qfec_free_res(priv->fuse_res, priv->fuse_base);
+ qfec_free_res(priv->clk_res, priv->clk_base);
+ qfec_free_res(priv->mac_res, priv->mac_base);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+
+ return 0;
+}
+
+/*
+ * module support
+ * the FSM9xxx is not a mobile device does not support power management
+ */
+
+static struct platform_driver qfec_driver = {
+ .probe = qfec_probe,
+ .remove = __devexit_p(qfec_remove),
+ .driver = {
+ .name = QFEC_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * module init
+ */
+static int __init qfec_init_module(void)
+{
+ int res;
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %s\n", __func__, qfec_driver.driver.name);
+
+ res = platform_driver_register(&qfec_driver);
+
+ QFEC_LOG(QFEC_LOG_DBG, "%s: %d - platform_driver_register\n",
+ __func__, res);
+
+ return res;
+}
+
+/*
+ * module exit
+ */
+static void __exit qfec_exit_module(void)
+{
+ QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+ platform_driver_unregister(&qfec_driver);
+}
+
+MODULE_DESCRIPTION("FSM Network Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_VERSION("1.0");
+
+module_init(qfec_init_module);
+module_exit(qfec_exit_module);
diff --git a/drivers/net/ethernet/msm/qfec.h b/drivers/net/ethernet/msm/qfec.h
new file mode 100644
index 0000000..310406a
--- /dev/null
+++ b/drivers/net/ethernet/msm/qfec.h
@@ -0,0 +1,800 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* qualcomm fast Ethernet controller HW description */
+
+#ifndef _QFEC_EMAC_H_
+# define _QFEC_EMAC_H_
+
+# ifndef __KERNEL__
+# include "stdint.h"
+# endif
+
+# define MskBits(nBits, pos) (((1 << nBits)-1)<<pos)
+
+/* Rx/Tx Ethernet Buffer Descriptors
+ * status contains the ownership, status and receive length bits
+ * ctl contains control and size bits for two buffers
+ * p_buf contains a ptr to the data buffer
+ * MAC writes timestamp low into p_buf
+ * next contains either ptr to 2nd buffer or next buffer-desc
+ * MAC writes timestamp high into next
+ *
+ * status/ctl bit definition depend on RX or TX usage
+ */
+
+
+struct qfec_buf_desc {
+ uint32_t status;
+ uint32_t ctl;
+ void *p_buf;
+ void *next;
+};
+
+/* ownership bit operations */
+# define BUF_OWN 0x80000000 /* DMA owns buffer */
+# define BUF_OWN_DMA BUF_OWN
+
+/* RX buffer status bits */
+# define BUF_RX_AFM 0x40000000 /* dest addr filt fail */
+
+# define BUF_RX_FL 0x3fff0000 /* frame length */
+# define BUF_RX_FL_GET(p) ((p.status & BUF_RX_FL) >> 16)
+# define BUF_RX_FL_SET(p, x) \
+ (p.status = (p.status & ~BUF_RX_FL) | ((x << 16) & BUF_RX_FL))
+# define BUF_RX_FL_GET_FROM_STATUS(status) \
+ (((status) & BUF_RX_FL) >> 16)
+
+# define BUF_RX_ES 0x00008000 /* error summary */
+# define BUF_RX_DE 0x00004000 /* error descriptor (es) */
+# define BUF_RX_SAF 0x00002000 /* source addr filt fail */
+# define BUF_RX_LE 0x00001000 /* length error */
+
+# define BUF_RX_OE 0x00000800 /* overflow error (es) */
+# define BUF_RX_VLAN 0x00000400 /* vlan tag */
+# define BUF_RX_FS 0x00000200 /* first descriptor */
+# define BUF_RX_LS 0x00000100 /* last descriptor */
+
+# define BUF_RX_IPC 0x00000080 /* cksum-err/giant-frame (es) */
+# define BUF_RX_LC 0x00000040 /* late collision (es) */
+# define BUF_RX_FT 0x00000020 /* frame type */
+# define BUF_RX_RWT 0x00000010 /* rec watchdog timeout (es) */
+
+# define BUF_RX_RE 0x00000008 /* rec error (es) */
+# define BUF_RX_DBE 0x00000004 /* dribble bit err */
+# define BUF_RX_CE 0x00000002 /* crc err (es) */
+# define BUF_RX_CSE 0x00000001 /* checksum err */
+
+# define BUF_RX_ERRORS \
+ (BUF_RX_DE | BUF_RX_SAF | BUF_RX_LE | BUF_RX_OE \
+ | BUF_RX_IPC | BUF_RX_LC | BUF_RX_RWT | BUF_RX_RE \
+ | BUF_RX_DBE | BUF_RX_CE | BUF_RX_CSE)
+
+/* RX buffer control bits */
+# define BUF_RX_DI 0x80000000 /* disable intrp on compl */
+# define BUF_RX_RER 0x02000000 /* rec end of ring */
+# define BUF_RX_RCH 0x01000000 /* 2nd addr chained */
+
+# define BUF_RX_SIZ2 0x003ff800 /* buffer 2 size */
+# define BUF_RX_SIZ2_GET(p) ((p.control&BUF_RX_SIZ2) >> 11)
+
+# define BUF_RX_SIZ 0x000007ff /* rx buf 1 size */
+# define BUF_RX_SIZ_GET(p) (p.ctl&BUF_RX_SIZ)
+
+/* TX buffer status bits */
+# define BUF_TX_TTSS 0x00020000 /* time stamp status */
+# define BUF_TX_IHE 0x00010000 /* IP hdr err */
+
+# define BUF_TX_ES 0x00008000 /* error summary */
+# define BUF_TX_JT 0x00004000 /* jabber timeout (es) */
+# define BUF_TX_FF 0x00002000 /* frame flushed (es) */
+# define BUF_TX_PCE 0x00001000 /* payld cksum err */
+
+# define BUF_TX_LOC 0x00000800 /* loss carrier (es) */
+# define BUF_TX_NC 0x00000400 /* no carrier (es) */
+# define BUF_TX_LC 0x00000200 /* late collision (es) */
+# define BUF_TX_EC 0x00000100 /* excessive collision (es) */
+
+# define BUF_TX_VLAN 0x00000080 /* VLAN frame */
+# define BUF_TX_CC MskBits(4, 3) /* collision count */
+# define BUF_TX_CC_GET(p) ((p.status&BUF_TX_CC)>>3)
+
+# define BUF_TX_ED 0x00000004 /* excessive deferral (es) */
+# define BUF_TX_UF 0x00000002 /* underflow err (es) */
+# define BUF_TX_DB 0x00000001 /* deferred bit */
+
+/* TX buffer control bits */
+# define BUF_TX_IC 0x80000000 /* intrpt on compl */
+# define BUF_TX_LS 0x40000000 /* last segment */
+# define BUF_TX_FS 0x20000000 /* first segment */
+# define BUF_TX_CIC 0x18000000 /* cksum insert control */
+# define BUF_TX_CIC_SET(n) (BUF_TX_CIC&(n<<27))
+
+# define BUF_TX_DC 0x04000000 /* disable CRC */
+# define BUF_TX_TER 0x02000000 /* end of ring */
+# define BUF_TX_TCH 0x01000000 /* 2nd addr chained */
+
+# define BUF_TX_DP 0x00800000 /* disable padding */
+# define BUF_TX_TTSE 0x00400000 /* timestamp enable */
+
+# define BUF_TX_SIZ2 0x003ff800 /* buffer 2 size */
+# define BUF_TX_SIZ2_SET(n) (BUF_TX_SIZ2(n<<11))
+
+# define BUF_TX_SIZ 0x000007ff /* buffer 1 size */
+# define BUF_TX_SIZ_SET(n) (BUF_TX_SI1 & n)
+
+
+/* Ethernet Controller Registers */
+# define BUS_MODE_REG 0x1000
+
+# define BUS_MODE_MB 0x04000000 /* mixed burst */
+# define BUS_MODE_AAL 0x02000000 /* address alignment beats */
+# define BUS_MODE_8XPBL 0x01000000 /* */
+
+# define BUS_MODE_USP 0x00800000 /* use separate PBL */
+# define BUS_MODE_RPBL 0x007e0000 /* rxDMA PBL */
+# define BUS_MODE_FB 0x00010000 /* fixed burst */
+
+# define BUS_MODE_PR 0x0000c000 /* tx/rx priority */
+# define BUS_MODE_PR4 0x0000c000 /* tx/rx priority 4:1 */
+# define BUS_MODE_PR3 0x00008000 /* tx/rx priority 3:1 */
+# define BUS_MODE_PR2 0x00004000 /* tx/rx priority 2:1 */
+# define BUS_MODE_PR1 0x00000000 /* tx/rx priority 1:1 */
+
+# define BUS_MODE_PBL 0x00003f00 /* programmable burst length */
+# define BUS_MODE_PBLSET(n) (BUS_MODE_PBL&(n<<8))
+
+# define BUS_MODE_DSL 0x0000007c /* descriptor skip length */
+# define BUS_MODE_DSL_SET(n) (BUS_MODE_DSL & (n << 2))
+
+# define BUS_MODE_DA 0x00000002 /* DMA arbitration scheme */
+# define BUS_MODE_SWR 0x00000001 /* software reset */
+
+#define BUS_MODE_REG_DEFAULT (BUS_MODE_FB \
+ | BUS_MODE_AAL \
+ | BUS_MODE_PBLSET(16) \
+ | BUS_MODE_DA \
+ | BUS_MODE_DSL_SET(0))
+
+# define TX_POLL_DEM_REG 0x1004 /* transmit poll demand */
+# define RX_POLL_DEM_REG 0x1008 /* receive poll demand */
+
+# define RX_DES_LST_ADR_REG 0x100c /* receive buffer descriptor */
+# define TX_DES_LST_ADR_REG 0x1010 /* transmit buffer descriptor */
+
+# define STATUS_REG 0x1014
+
+# define STATUS_REG_RSVRD_1 0xc0000000 /* reserved */
+# define STATUS_REG_TTI 0x20000000 /* time-stamp trigger intrpt */
+# define STATUS_REG_GPI 0x10000000 /* gmac PMT interrupt */
+
+# define STATUS_REG_GMI 0x08000000 /* gmac MMC interrupt */
+# define STATUS_REG_GLI 0x04000000 /* gmac line interface intrpt */
+
+# define STATUS_REG_EB 0x03800000 /* error bits */
+# define STATUS_REG_EB_DATA 0x00800000 /* error during data transfer */
+# define STATUS_REG_EB_RDWR 0x01000000 /* error during rd/wr transfer */
+# define STATUS_REG_EB_DESC 0x02000000 /* error during desc access */
+
+# define STATUS_REG_TS 0x00700000 /* transmit process state */
+
+# define STATUS_REG_TS_STOP 0x00000000 /* stopped */
+# define STATUS_REG_TS_FETCH_DESC 0x00100000 /* fetching descriptor */
+# define STATUS_REG_TS_WAIT 0x00200000 /* waiting for status */
+# define STATUS_REG_TS_READ 0x00300000 /* reading host memory */
+# define STATUS_REG_TS_TIMESTAMP 0x00400000 /* timestamp write status */
+# define STATUS_REG_TS_RSVRD 0x00500000 /* reserved */
+# define STATUS_REG_TS_SUSPEND 0x00600000 /* desc-unavail/buffer-unflw */
+# define STATUS_REG_TS_CLOSE 0x00700000 /* closing desc */
+
+# define STATUS_REG_RS 0x000e0000 /* receive process state */
+
+# define STATUS_REG_RS_STOP 0x00000000 /* stopped */
+# define STATUS_REG_RS_FETCH_DESC 0x00020000 /* fetching descriptor */
+# define STATUS_REG_RS_RSVRD_1 0x00040000 /* reserved */
+# define STATUS_REG_RS_WAIT 0x00060000 /* waiting for packet */
+# define STATUS_REG_RS_SUSPEND 0x00080000 /* desc unavail */
+# define STATUS_REG_RS_CLOSE 0x000a0000 /* closing desc */
+# define STATUS_REG_RS_TIMESTAMP 0x000c0000 /* timestamp write status */
+# define STATUS_REG_RS_RSVRD_2 0x000e0000 /* writing host memory */
+
+# define STATUS_REG_NIS 0x00010000 /* normal intrpt 14|6|2|0 */
+# define STATUS_REG_AIS 0x00008000 /* intrpts 13|10|9|8|7|5|4|3|1 */
+
+# define STATUS_REG_ERI 0x00004000 /* early receive interrupt */
+# define STATUS_REG_FBI 0x00002000 /* fatal bus error interrupt */
+# define STATUS_REG_RSVRD_2 0x00001800 /* reserved */
+
+# define STATUS_REG_ETI 0x00000400 /* early transmit interrupt */
+# define STATUS_REG_RWT 0x00000200 /* receive watchdog timeout */
+# define STATUS_REG_RPS 0x00000100 /* receive process stopped */
+
+# define STATUS_REG_RU 0x00000080 /* receive buffer unavailable */
+# define STATUS_REG_RI 0x00000040 /* receive interrupt */
+# define STATUS_REG_UNF 0x00000020 /* transmit underflow */
+# define STATUS_REG_OVF 0x00000010 /* receive overflow */
+
+# define STATUS_REG_TJT 0x00000008 /* transmit jabber timeout */
+# define STATUS_REG_TU 0x00000004 /* transmit buffer unavailable */
+# define STATUS_REG_TPS 0x00000002 /* transmit process stopped */
+# define STATUS_REG_TI 0x00000001 /* transmit interrupt */
+
+# define STATUS_REG_AIS_BITS (STATUS_REG_FBI | STATUS_REG_ETI \
+ | STATUS_REG_RWT | STATUS_REG_RPS \
+ | STATUS_REG_RU | STATUS_REG_UNF \
+ | STATUS_REG_OVF | STATUS_REG_TJT \
+ | STATUS_REG_TPS | STATUS_REG_AIS)
+
+# define OPER_MODE_REG 0x1018
+
+# define OPER_MODE_REG_DT 0x04000000 /* disab drop ip cksum err fr */
+# define OPER_MODE_REG_RSF 0x02000000 /* rec store and forward */
+# define OPER_MODE_REG_DFF 0x01000000 /* disable flush of rec frames */
+
+# define OPER_MODE_REG_RFA2 0x00800000 /* thresh MSB for act flow-ctl */
+# define OPER_MODE_REG_RFD2 0x00400000 /* thresh MSB deAct flow-ctl */
+# define OPER_MODE_REG_TSF 0x00200000 /* tx store and forward */
+# define OPER_MODE_REG_FTF 0x00100000 /* flush tx FIFO */
+
+# define OPER_MODE_REG_RSVD1 0x000e0000 /* reserved */
+# define OPER_MODE_REG_TTC 0x0001c000 /* transmit threshold control */
+# define OPER_MODE_REG_TTC_SET(x) (OPER_MODE_REG_TTC & (x << 14))
+# define OPER_MODE_REG_ST 0x00002000 /* start/stop transmission cmd */
+
+# define OPER_MODE_REG_RFD 0x00001800 /* thresh for deAct flow-ctl */
+# define OPER_MODE_REG_RFA 0x00000600 /* threshold for act flow-ctl */
+# define OPER_MODE_REG_EFC 0x00000100 /* enable HW flow-ctl */
+
+# define OPER_MODE_REG_FEF 0x00000080 /* forward error frames */
+# define OPER_MODE_REG_FUF 0x00000040 /* forward undersize good fr */
+# define OPER_MODE_REG_RSVD2 0x00000020 /* reserved */
+# define OPER_MODE_REG_RTC 0x00000018 /* receive threshold control */
+# define OPER_MODE_REG_RTC_SET(x) (OPER_MODE_REG_RTC & (x << 3))
+
+# define OPER_MODE_REG_OSF 0x00000004 /* operate on second frame */
+# define OPER_MODE_REG_SR 0x00000002 /* start/stop receive */
+# define OPER_MODE_REG_RSVD3 0x00000001 /* reserved */
+
+
+#define OPER_MODE_REG_DEFAULT (OPER_MODE_REG_RSF \
+ | OPER_MODE_REG_TSF \
+ | OPER_MODE_REG_TTC_SET(5) \
+ | OPER_MODE_REG_RTC_SET(1) \
+ | OPER_MODE_REG_OSF)
+
+# define INTRP_EN_REG 0x101c
+
+# define INTRP_EN_REG_RSVD1 0xfffc0000 /* */
+# define INTRP_EN_REG_NIE 0x00010000 /* normal intrpt summ enable */
+
+# define INTRP_EN_REG_AIE 0x00008000 /* abnormal intrpt summary en */
+# define INTRP_EN_REG_ERE 0x00004000 /* early receive intrpt enable */
+# define INTRP_EN_REG_FBE 0x00002000 /* fatal bus error enable */
+
+# define INTRP_EN_REG_RSVD2 0x00001800 /* */
+
+# define INTRP_EN_REG_ETE 0x00000400 /* early tx intrpt enable */
+# define INTRP_EN_REG_RWE 0x00000200 /* rx watchdog timeout enable */
+# define INTRP_EN_REG_RSE 0x00000100 /* rx stopped enable */
+
+# define INTRP_EN_REG_RUE 0x00000080 /* rx buf unavailable enable */
+# define INTRP_EN_REG_RIE 0x00000040 /* rx interrupt enable */
+# define INTRP_EN_REG_UNE 0x00000020 /* underflow interrupt enable */
+# define INTRP_EN_REG_OVE 0x00000010 /* overflow interrupt enable */
+
+# define INTRP_EN_REG_TJE 0x00000008 /* tx jabber timeout enable */
+# define INTRP_EN_REG_TUE 0x00000004 /* tx buf unavailable enable */
+# define INTRP_EN_REG_TSE 0x00000002 /* tx stopped enable */
+# define INTRP_EN_REG_TIE 0x00000001 /* tx interrupt enable */
+
+# define INTRP_EN_REG_All (~(INTRP_EN_REG_RSVD1))
+
+# define MIS_FR_REG 0x1020
+
+# define MIS_FR_REG_FIFO_OVFL 0x10000000 /* fifo overflow */
+# define MIS_FR_REG_FIFO_CNT 0x0FFE0000 /* fifo cnt */
+
+# define MIS_FR_REG_MISS_OVFL 0x00010000 /* missed-frame overflow */
+# define MIS_FR_REG_MISS_CNT 0x0000FFFF /* missed-frame cnt */
+
+# define RX_INTRP_WTCHDOG_REG 0x1024
+# define AXI_BUS_MODE_REG 0x1028
+
+# define AXI_BUS_MODE_EN_LPI 0x80000000 /* enable low power interface */
+# define AXI_BUS_MODE_UNLK_MGC_PKT 0x40000000 /* unlock-magic-pkt/rem-wk-up */
+# define AXI_BUS_MODE_WR_OSR_LMT 0x00F00000 /* max wr out stndg req limit */
+# define AXI_BUS_MODE_RD_OSR_LMT 0x000F0000 /* max rd out stndg req limit */
+# define AXI_BUS_MODE_AXI_AAL 0x00001000 /* address aligned beats */
+# define AXI_BUS_MODE_BLEN256 0x00000080 /* axi burst length 256 */
+# define AXI_BUS_MODE_BLEN128 0x00000040 /* axi burst length 128 */
+# define AXI_BUS_MODE_BLEN64 0x00000020 /* axi burst length 64 */
+# define AXI_BUS_MODE_BLEN32 0x00000010 /* axi burst length 32 */
+# define AXI_BUS_MODE_BLEN16 0x00000008 /* axi burst length 16 */
+# define AXI_BUS_MODE_BLEN8 0x00000004 /* axi burst length 8 */
+# define AXI_BUS_MODE_BLEN4 0x00000002 /* axi burst length 4 */
+# define AXI_BUS_MODE_UNDEF 0x00000001 /* axi undef burst length */
+
+#define AXI_BUS_MODE_DEFAULT (AXI_BUS_MODE_WR_OSR_LMT \
+ | AXI_BUS_MODE_RD_OSR_LMT \
+ | AXI_BUS_MODE_BLEN16 \
+ | AXI_BUS_MODE_BLEN8 \
+ | AXI_BUS_MODE_BLEN4)
+
+# define AXI_STATUS_REG 0x102c
+
+/* 0x1030-0x1044 reserved */
+# define CUR_HOST_TX_DES_REG 0x1048
+# define CUR_HOST_RX_DES_REG 0x104c
+# define CUR_HOST_TX_BU_ADR_REG 0x1050
+# define CUR_HOST_RX_BU_ADR_REG 0x1054
+
+# define HW_FEATURE_REG 0x1058
+
+# define MAC_CONFIG_REG 0x0000
+
+# define MAC_CONFIG_REG_RSVD1 0xf8000000 /* */
+
+# define MAC_CONFIG_REG_SFTERR 0x04000000 /* smii force tx error */
+# define MAC_CONFIG_REG_CST 0x02000000 /* crc strip for type frame */
+# define MAC_CONFIG_REG_TC 0x01000000 /* tx cfg in rgmii/sgmii/smii */
+
+# define MAC_CONFIG_REG_WD 0x00800000 /* watchdog disable */
+# define MAC_CONFIG_REG_JD 0x00400000 /* jabber disable */
+# define MAC_CONFIG_REG_BE 0x00200000 /* frame burst enable */
+# define MAC_CONFIG_REG_JE 0x00100000 /* jumbo frame enable */
+
+# define MAC_CONFIG_REG_IFG 0x000e0000 /* inter frame gap, 96-(8*n) */
+# define MAC_CONFIG_REG_DCRS 0x00010000 /* dis carrier sense during tx */
+
+# define MAC_CONFIG_REG_PS 0x00008000 /* port select: 0/1 g/(10/100) */
+# define MAC_CONFIG_REG_FES 0x00004000 /* speed 100 mbps */
+# define MAC_CONFIG_REG_SPD (MAC_CONFIG_REG_PS | MAC_CONFIG_REG_FES)
+# define MAC_CONFIG_REG_SPD_1G (0)
+# define MAC_CONFIG_REG_SPD_100 (MAC_CONFIG_REG_PS | MAC_CONFIG_REG_FES)
+# define MAC_CONFIG_REG_SPD_10 (MAC_CONFIG_REG_PS)
+# define MAC_CONFIG_REG_SPD_SET(x) (MAC_CONFIG_REG_PS_FES & (x << 14))
+
+# define MAC_CONFIG_REG_DO 0x00002000 /* disable receive own */
+# define MAC_CONFIG_REG_LM 0x00001000 /* loopback mode */
+
+# define MAC_CONFIG_REG_DM 0x00000800 /* (full) duplex mode */
+# define MAC_CONFIG_REG_IPC 0x00000400 /* checksum offload */
+# define MAC_CONFIG_REG_DR 0x00000200 /* disable retry */
+# define MAC_CONFIG_REG_LUD 0x00000100 /* link up/down */
+
+# define MAC_CONFIG_REG_ACS 0x00000080 /* auto pad/crc stripping */
+# define MAC_CONFIG_REG_BL 0x00000060 /* back-off limit */
+# define MAC_CONFIG_REG_BL_10 0x00000000 /* 10 */
+# define MAC_CONFIG_REG_BL_8 0x00000020 /* 8 */
+# define MAC_CONFIG_REG_BL_4 0x00000040 /* 4 */
+# define MAC_CONFIG_REG_BL_1 0x00000060 /* 1 */
+# define MAC_CONFIG_REG_DC 0x00000010 /* deferral check */
+
+# define MAC_CONFIG_REG_TE 0x00000008 /* transmitter enable */
+# define MAC_CONFIG_REG_RE 0x00000004 /* receiver enable */
+# define MAC_CONFIG_REG_RSVD2 0x00000003 /* */
+
+# define MAC_FR_FILTER_REG 0x0004
+
+# define MAC_FR_FILTER_RA 0x80000000 /* receive all */
+
+# define MAC_FR_FILTER_HPF 0x00000400 /* hash or perfect filter */
+# define MAC_FR_FILTER_SAF 0x00000200 /* source addr filt en */
+# define MAC_FR_FILTER_SAIF 0x00000100 /* SA inverse filter */
+# define MAC_FR_FILTER_PCF_MASK 0x000000c0 /* pass control frames */
+# define MAC_FR_FILTER_PCF_0 0x00000000 /* */
+# define MAC_FR_FILTER_PCF_1 0x00000040 /* */
+# define MAC_FR_FILTER_PCF_2 0x00000080 /* */
+# define MAC_FR_FILTER_PCF_3 0x000000c0 /* */
+# define MAC_FR_FILTER_DBF 0x00000020 /* disable broadcast frames */
+# define MAC_FR_FILTER_PM 0x00000010 /* pass all multicast */
+# define MAC_FR_FILTER_DAIF 0x00000008 /* DA inverse filtering */
+# define MAC_FR_FILTER_HMC 0x00000004 /* hash multicast */
+# define MAC_FR_FILTER_HUC 0x00000002 /* hash unicast */
+# define MAC_FR_FILTER_PR 0x00000001 /* promiscuous mode */
+
+# define HASH_TABLE_HIGH_REG 0x0008
+# define HASH_TABLE_LOW_REG 0x000c
+
+# define GMII_ADR_REG 0x0010
+
+# define GMII_ADR_REG_PA 0x0000f800 /* addr bits */
+# define GMII_ADR_REG_GR 0x000007c0 /* addr bits */
+# define GMII_ADR_REG_RSVRD1 0x00000020 /* */
+# define GMII_ADR_REG_CR 0x0000001c /* csr clock range */
+# define GMII_ADR_REG_GW 0x00000002 /* gmii write */
+# define GMII_ADR_REG_GB 0x00000001 /* gmii busy */
+
+# define GMII_ADR_REG_ADR_SET(x) (GMII_ADR_REG_PA & (x << 11))
+# define GMII_ADR_REG_ADR_GET(x) ((x & GMII_ADR_REG_PA) >> 11)
+
+# define GMII_ADR_REG_REG_SET(x) (GMII_ADR_REG_GR & (x << 6))
+# define GMII_ADR_REG_REG_GET(x) (((x & GMII_ADR_REG_GR) >> 6)
+
+# define GMII_ADR_REG_CSR_SET(x) (GMII_ADR_REG_CR & (x << 2))
+# define GMII_ADR_REG_CSR_GET(x) (((x & GMII_ADR_REG_CR) >> 2)
+
+# define GMII_DATA_REG 0x0014
+
+# define GMII_DATA_REG_DATA 0x0000ffff /* gmii data */
+
+# define FLOW_CONTROL_REG 0x0018
+
+# define FLOW_CONTROL_PT 0xFFFF0000 /* pause time */
+# define FLOW_CONTROL_DZPQ 0x00000080 /* disable zero-quanta pause */
+# define FLOW_CONTROL_PLT 0x00000030 /* pause level threshold */
+
+# define FLOW_CONTROL_UP 0x00000008 /* unicast pause frame detect */
+# define FLOW_CONTROL_RFE 0x00000004 /* receive flow control enable */
+# define FLOW_CONTROL_TFE 0x00000002 /* transmit flow control enable */
+# define FLOW_CONTROL_FCB 0x00000001 /* flow control busy (BPA) */
+
+# define VLAN_TAG_REG 0x001c
+
+# define VERSION_REG 0x0020
+
+/* don't define these until HW if finished */
+/* # define VERSION_USER 0x10 */
+/* # define VERSION_QFEC 0x36 */
+
+# define VERSION_REG_USER(x) (0xFF & (x >> 8))
+# define VERSION_REG_QFEC(x) (0xFF & x)
+
+# define DEBUG_REG 0x0024
+
+# define DEBUG_REG_RSVD1 0xfc000000 /* */
+# define DEBUG_REG_TX_FIFO_FULL 0x02000000 /* Tx fifo full */
+# define DEBUG_REG_TX_FIFO_NEMP 0x01000000 /* Tx fifo not empty */
+
+# define DEBUG_REG_RSVD2 0x00800000 /* */
+# define DEBUG_REG_TX_WR_ACTIVE 0x00400000 /* Tx fifo write ctrl active */
+
+# define DEBUG_REG_TX_RD_STATE 0x00300000 /* Tx fifo rd ctrl state */
+# define DEBUG_REG_TX_RD_IDLE 0x00000000 /* idle */
+# define DEBUG_REG_TX_RD_WAIT 0x00100000 /* waiting for status */
+# define DEBUG_REG_TX_RD_PASUE 0x00200000 /* generating pause */
+# define DEBUG_REG_TX_RD_WRTG 0x00300000 /* wr stat flush fifo */
+
+# define DEBUG_REG_TX_PAUSE 0x00080000 /* Tx in pause condition */
+
+# define DEBUG_REG_TX_CTRL_STATE 0x00060000 /* Tx frame controller state */
+# define DEBUG_REG_TX_CTRL_IDLE 0x00090000 /* idle */
+# define DEBUG_REG_TX_CTRL_WAIT 0x00020000 /* waiting for status*/
+# define DEBUG_REG_TX_CTRL_PAUSE 0x00040000 /* generating pause */
+# define DEBUG_REG_TX_CTRL_XFER 0x00060000 /* transferring input */
+
+# define DEBUG_REG_TX_ACTIVE 0x00010000 /* Tx actively transmitting */
+# define DEBUG_REG_RSVD3 0x0000fc00 /* */
+
+# define DEBUG_REG_RX_STATE 0x00000300 /* Rx fifo state */
+# define DEBUG_REG_RX_EMPTY 0x00000000 /* empty */
+# define DEBUG_REG_RX_LOW 0x00000100 /* below threshold */
+# define DEBUG_REG_RX_HIGH 0x00000200 /* above threshold */
+# define DEBUG_REG_RX_FULL 0x00000300 /* full */
+
+# define DEBUG_REG_RSVD4 0x00000080 /* */
+
+# define DEBUG_REG_RX_RD_STATE 0x00000060 /* Rx rd ctrl state */
+# define DEBUG_REG_RX_RD_IDLE 0x00000000 /* idle */
+# define DEBUG_REG_RX_RD_RDG_FR 0x00000020 /* reading frame data */
+# define DEBUG_REG_RX_RD_RDG_STA 0x00000040 /* reading status */
+# define DEBUG_REG_RX_RD_FLUSH 0x00000060 /* flush fr data/stat */
+
+# define DEBUG_REG_RX_ACTIVE 0x00000010 /* Rx wr ctlr active */
+
+# define DEBUG_REG_RSVD5 0x00000008 /* */
+# define DEBUG_REG_SM_FIFO_RW_STA 0x00000006 /* small fifo rd/wr state */
+# define DEBUG_REG_RX_RECVG 0x00000001 /* Rx actively receiving data */
+
+# define REM_WAKEUP_FR_REG 0x0028
+# define PMT_CTRL_STAT_REG 0x002c
+/* 0x0030-0x0034 reserved */
+
+# define INTRP_STATUS_REG 0x0038
+
+# define INTRP_STATUS_REG_RSVD1 0x0000fc00 /* */
+# define INTRP_STATUS_REG_TSI 0x00000200 /* time stamp int stat */
+# define INTRP_STATUS_REG_RSVD2 0x00000100 /* */
+
+# define INTRP_STATUS_REG_RCOI 0x00000080 /* rec checksum offload int */
+# define INTRP_STATUS_REG_TI 0x00000040 /* tx int stat */
+# define INTRP_STATUS_REG_RI 0x00000020 /* rx int stat */
+# define INTRP_STATUS_REG_NI 0x00000010 /* normal int summary */
+
+# define INTRP_STATUS_REG_PMTI 0x00000008 /* PMT int */
+# define INTRP_STATUS_REG_ANC 0x00000004 /* auto negotiation complete */
+# define INTRP_STATUS_REG_LSC 0x00000002 /* link status change */
+# define INTRP_STATUS_REG_MII 0x00000001 /* rgMii/sgMii int */
+
+# define INTRP_MASK_REG 0x003c
+
+# define INTRP_MASK_REG_RSVD1 0xfc00 /* */
+# define INTRP_MASK_REG_TSIM 0x0200 /* time stamp int mask */
+# define INTRP_MASK_REG_RSVD2 0x01f0 /* */
+
+# define INTRP_MASK_REG_PMTIM 0x0000 /* PMT int mask */
+# define INTRP_MASK_REG_ANCM 0x0000 /* auto negotiation compl mask */
+# define INTRP_MASK_REG_LSCM 0x0000 /* link status change mask */
+# define INTRP_MASK_REG_MIIM 0x0000 /* rgMii/sgMii int mask */
+
+# define MAC_ADR_0_HIGH_REG 0x0040
+# define MAC_ADR_0_LOW_REG 0x0044
+/* additional pairs of registers for MAC addresses 1-15 */
+# define MAC_ADR_HIGH_REG_N(n) (((n) < 16) ? \
+ (MAC_ADR_0_HIGH_REG + (n) * 8) : \
+ (MAC_ADR16_HIGH_REG + ((n) - 16) * 8))
+# define MAC_ADR_LOW_REG_N(n) (((n) < 16) ? \
+ (MAC_ADR_0_LOW_REG + (n) * 8) : \
+ (MAC_ADR16_LOW_REG + ((n) - 16) * 8))
+
+# define AN_CONTROL_REG 0x00c0
+
+# define AN_CONTROL_REG_RSVRD1 0xfff80000 /* */
+# define AN_CONTROL_REG_SGM_RAL 0x00040000 /* sgmii ral control */
+# define AN_CONTROL_REG_LR 0x00020000 /* lock to reference */
+# define AN_CONTROL_REG_ECD 0x00010000 /* enable comma detect */
+
+# define AN_CONTROL_REG_RSVRD2 0x00008000 /* */
+# define AN_CONTROL_REG_ELE 0x00004000 /* external loopback enable */
+# define AN_CONTROL_REG_RSVRD3 0x00002000 /* */
+# define AN_CONTROL_REG_ANE 0x00001000 /* auto negotiation enable */
+
+# define AN_CONTROL_REG_RSRVD4 0x00000c00 /* */
+# define AN_CONTROL_REG_RAN 0x00000200 /* restart auto negotiation */
+# define AN_CONTROL_REG_RSVRD5 0x000001ff /* */
+
+# define AN_STATUS_REG 0x00c4
+
+# define AN_STATUS_REG_RSVRD1 0xfffffe00 /* */
+# define AN_STATUS_REG_ES 0x00000100 /* extended status */
+# define AN_STATUS_REG_RSVRD2 0x000000c0 /* */
+# define AN_STATUS_REG_ANC 0x00000020 /* auto-negotiation complete */
+# define AN_STATUS_REG_RSVRD3 0x00000010 /* */
+# define AN_STATUS_REG_ANA 0x00000008 /* auto-negotiation ability */
+# define AN_STATUS_REG_LS 0x00000004 /* link status */
+# define AN_STATUS_REG_RSVRD4 0x00000003 /* */
+
+# define AN_ADVERTISE_REG 0x00c8
+# define AN_LNK_PRTNR_ABIL_REG 0x00cc
+# define AN_EXPANDSION_REG 0x00d0
+# define TBI_EXT_STATUS_REG 0x00d4
+
+# define SG_RG_SMII_STATUS_REG 0x00d8
+
+# define LINK_STATUS_REG 0x00d8
+
+# define LINK_STATUS_REG_RSVRD1 0xffffffc0 /* */
+# define LINK_STATUS_REG_FCD 0x00000020 /* false carrier detect */
+# define LINK_STATUS_REG_JT 0x00000010 /* jabber timeout */
+# define LINK_STATUS_REG_UP 0x00000008 /* link status */
+
+# define LINK_STATUS_REG_SPD 0x00000006 /* link speed */
+# define LINK_STATUS_REG_SPD_2_5 0x00000000 /* 10M 2.5M * 4 */
+# define LINK_STATUS_REG_SPD_25 0x00000002 /* 100M 25M * 4 */
+# define LINK_STATUS_REG_SPD_125 0x00000004 /* 1G 125M * 8 */
+
+# define LINK_STATUS_REG_F_DUPLEX 0x00000001 /* full duplex */
+
+/* 0x00dc-0x00fc reserved */
+
+/* MMC Register Map is from 0x0100-0x02fc */
+# define MMC_CNTRL_REG 0x0100
+# define MMC_INTR_RX_REG 0x0104
+# define MMC_INTR_TX_REG 0x0108
+# define MMC_INTR_MASK_RX_REG 0x010C
+# define MMC_INTR_MASK_TX_REG 0x0110
+# define NUM_MULTCST_FRM_RCVD_G 0x0190
+
+/* 0x0300-0x06fc reserved */
+
+/* precision time protocol time stamp registers */
+
+# define TS_CTL_REG 0x0700
+
+# define TS_CTL_ATSFC 0x00080000
+# define TS_CTL_TSENMAC 0x00040000
+
+# define TS_CTL_TSCLKTYPE 0x00030000
+# define TS_CTL_TSCLK_ORD 0x00000000
+# define TS_CTL_TSCLK_BND 0x00010000
+# define TS_CTL_TSCLK_ETE 0x00020000
+# define TS_CTL_TSCLK_PTP 0x00030000
+
+# define TS_CTL_TSMSTRENA 0x00008000
+# define TS_CTL_TSEVNTENA 0x00004000
+# define TS_CTL_TSIPV4ENA 0x00002000
+# define TS_CTL_TSIPV6ENA 0x00001000
+
+# define TS_CTL_TSIPENA 0x00000800
+# define TS_CTL_TSVER2ENA 0x00000400
+# define TS_CTL_TSCTRLSSR 0x00000200
+# define TS_CTL_TSENALL 0x00000100
+
+# define TS_CTL_TSADDREG 0x00000020
+# define TS_CTL_TSTRIG 0x00000010
+
+# define TS_CTL_TSUPDT 0x00000008
+# define TS_CTL_TSINIT 0x00000004
+# define TS_CTL_TSCFUPDT 0x00000002
+# define TS_CTL_TSENA 0x00000001
+
+
+# define TS_SUB_SEC_INCR_REG 0x0704
+# define TS_HIGH_REG 0x0708
+# define TS_LOW_REG 0x070c
+# define TS_HI_UPDT_REG 0x0710
+# define TS_LO_UPDT_REG 0x0714
+# define TS_APPEND_REG 0x0718
+# define TS_TARG_TIME_HIGH_REG 0x071c
+# define TS_TARG_TIME_LOW_REG 0x0720
+# define TS_HIGHER_WD_REG 0x0724
+# define TS_STATUS_REG 0x072c
+
+/* 0x0730-0x07fc reserved */
+
+# define MAC_ADR16_HIGH_REG 0x0800
+# define MAC_ADR16_LOW_REG 0x0804
+/* additional pairs of registers for MAC addresses 17-31 */
+
+# define MAC_ADR_MAX 32
+
+
+# define QFEC_INTRP_SETUP (INTRP_EN_REG_AIE \
+ | INTRP_EN_REG_FBE \
+ | INTRP_EN_REG_RWE \
+ | INTRP_EN_REG_RSE \
+ | INTRP_EN_REG_RUE \
+ | INTRP_EN_REG_UNE \
+ | INTRP_EN_REG_OVE \
+ | INTRP_EN_REG_TJE \
+ | INTRP_EN_REG_TSE \
+ | INTRP_EN_REG_NIE \
+ | INTRP_EN_REG_RIE \
+ | INTRP_EN_REG_TIE)
+
+/*
+ * ASIC Ethernet clock register definitions:
+ * address offsets and some register definitions
+ */
+
+# define EMAC_CLK_REG_BASE 0x94020000
+
+/*
+ * PHY clock PLL register locations
+ */
+# define ETH_MD_REG 0x02A4
+# define ETH_NS_REG 0x02A8
+
+/* definitions of NS_REG control bits
+ */
+# define ETH_NS_SRC_SEL 0x0007
+
+# define ETH_NS_PRE_DIV_MSK 0x0018
+# define ETH_NS_PRE_DIV(x) (ETH_NS_PRE_DIV_MSK & (x << 3))
+
+# define ETH_NS_MCNTR_MODE_MSK 0x0060
+# define ETH_NS_MCNTR_MODE_BYPASS 0x0000
+# define ETH_NS_MCNTR_MODE_SWALLOW 0x0020
+# define ETH_NS_MCNTR_MODE_DUAL 0x0040
+# define ETH_NS_MCNTR_MODE_SINGLE 0x0060
+
+# define ETH_NS_MCNTR_RST 0x0080
+# define ETH_NS_MCNTR_EN 0x0100
+
+# define EMAC_PTP_NS_CLK_EN 0x0200
+# define EMAC_PTP_NS_CLK_INV 0x0400
+# define EMAC_PTP_NS_ROOT_EN 0x0800
+
+/* clock sources
+ */
+# define CLK_SRC_TCXO 0x0
+# define CLK_SRC_PLL_GLOBAL 0x1
+# define CLK_SRC_PLL_ARM 0x2
+# define CLK_SRC_PLL_QDSP6 0x3
+# define CLK_SRC_PLL_EMAC 0x4
+# define CLK_SRC_EXT_CLK2 0x5
+# define CLK_SRC_EXT_CLK1 0x6
+# define CLK_SRC_CORE_TEST 0x7
+
+# define ETH_MD_M(x) (x << 16)
+# define ETH_MD_2D_N(x) ((~(x) & 0xffff))
+# define ETH_NS_NM(x) ((~(x) << 16) & 0xffff0000)
+
+/*
+ * PHY interface clock divider
+ */
+# define ETH_X_EN_NS_REG 0x02AC
+
+# define ETH_RX_CLK_FB_INV 0x80
+# define ETH_RX_CLK_FB_EN 0x40
+# define ETH_TX_CLK_FB_INV 0x20
+# define ETH_TX_CLK_FB_EN 0x10
+# define ETH_RX_CLK_INV 0x08
+# define ETH_RX_CLK_EN 0x04
+# define ETH_TX_CLK_INV 0x02
+# define ETH_TX_CLK_EN 0x01
+
+# define ETH_X_EN_NS_DEFAULT \
+ (ETH_RX_CLK_FB_EN | ETH_TX_CLK_FB_EN | ETH_RX_CLK_EN | ETH_TX_CLK_EN)
+
+# define EMAC_PTP_MD_REG 0x02B0
+
+/* PTP clock divider
+ */
+# define EMAC_PTP_NS_REG 0x02B4
+
+/*
+ * clock interface pin controls
+ */
+# define EMAC_NS_REG 0x02B8
+
+# define EMAC_RX_180_CLK_INV 0x2000
+# define EMAC_RX_180_CLK_EN 0x1000
+# define EMAC_RX_180_CLK_EN_INV (EMAC_RX_180_CLK_INV | EMAC_RX_180_CLK_EN)
+
+# define EMAC_TX_180_CLK_INV 0x0800
+# define EMAC_TX_180_CLK_EN 0x0400
+# define EMAC_TX_180_CLK_EN_INV (EMAC_TX_180_CLK_INV | EMAC_TX_180_CLK_EN)
+
+# define EMAC_REVMII_RX_CLK_INV 0x0200
+# define EMAC_REVMII_RX_CLK_EN 0x0100
+
+# define EMAC_RX_CLK_INV 0x0080
+# define EMAC_RX_CLK_EN 0x0040
+
+# define EMAC_REVMII_TX_CLK_INV 0x0020
+# define EMAC_REVMII_TX_CLK_EN 0x0010
+
+# define EMAC_TX_CLK_INV 0x0008
+# define EMAC_TX_CLK_EN 0x0004
+
+# define EMAC_RX_R_CLK_EN 0x0002
+# define EMAC_TX_R_CLK_EN 0x0001
+
+# define EMAC_NS_DEFAULT \
+ (EMAC_RX_180_CLK_EN_INV | EMAC_TX_180_CLK_EN_INV \
+ | EMAC_REVMII_RX_CLK_EN | EMAC_REVMII_TX_CLK_EN \
+ | EMAC_RX_CLK_EN | EMAC_TX_CLK_EN \
+ | EMAC_RX_R_CLK_EN | EMAC_TX_R_CLK_EN)
+
+/*
+ *
+ */
+# define EMAC_TX_FS_REG 0x02BC
+# define EMAC_RX_FS_REG 0x02C0
+
+/*
+ * Ethernet controller PHY interface select
+ */
+# define EMAC_PHY_INTF_SEL_REG 0x18030
+
+# define EMAC_PHY_INTF_SEL_MII 0x0
+# define EMAC_PHY_INTF_SEL_RGMII 0x1
+# define EMAC_PHY_INTF_SEL_REVMII 0x7
+# define EMAC_PHY_INTF_SEL_MASK 0x7
+
+/*
+ * MDIO addresses
+ */
+# define EMAC_PHY_ADDR_REG 0x18034
+# define EMAC_REVMII_PHY_ADDR_REG 0x18038
+
+/*
+ * clock routing
+ */
+# define EMAC_CLKMUX_SEL_REG 0x1803c
+
+# define EMAC_CLKMUX_SEL_0 0x1
+# define EMAC_CLKMUX_SEL_1 0x2
+
+
+#endif
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5f53fbb..1e1617e 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -223,6 +223,20 @@
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#define SMC_IRQ_FLAGS (-1) /* from resource */
+#elif defined(CONFIG_ARCH_MSM)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_NOWAIT 1
+
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
+
#elif defined(CONFIG_MN10300)
/*
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index cd3defb..b0d4c4c 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -44,6 +44,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/timer.h>
@@ -954,7 +955,7 @@
(!pdata->using_extphy)) {
/* Restore original GPIO configuration */
pdata->gpio_setting = pdata->gpio_orig_setting;
- smsc911x_reg_write(pdata, GPIO_CFG,
+ smsc911x_reg_write(pdata, SMSC_GPIO_CFG,
pdata->gpio_setting);
}
} else {
@@ -962,7 +963,7 @@
/* Check global setting that LED1
* usage is 10/100 indicator */
pdata->gpio_setting = smsc911x_reg_read(pdata,
- GPIO_CFG);
+ SMSC_GPIO_CFG);
if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) &&
(!pdata->using_extphy)) {
/* Force 10/100 LED off, after saving
@@ -973,7 +974,7 @@
pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_
| GPIO_CFG_GPIODIR0_
| GPIO_CFG_GPIOD0_);
- smsc911x_reg_write(pdata, GPIO_CFG,
+ smsc911x_reg_write(pdata, SMSC_GPIO_CFG,
pdata->gpio_setting);
}
}
@@ -1485,7 +1486,7 @@
SMSC_WARN(pdata, ifup,
"Timed out waiting for EEPROM busy bit to clear");
- smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000);
+ smsc911x_reg_write(pdata, SMSC_GPIO_CFG, 0x70070000);
/* The soft reset above cleared the device's MAC address,
* restore it from local copy (set in probe) */
@@ -1931,9 +1932,9 @@
static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata)
{
- unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG);
+ unsigned int temp = smsc911x_reg_read(pdata, SMSC_GPIO_CFG);
temp &= ~GPIO_CFG_EEPR_EN_;
- smsc911x_reg_write(pdata, GPIO_CFG, temp);
+ smsc911x_reg_write(pdata, SMSC_GPIO_CFG, temp);
msleep(1);
}
@@ -2241,6 +2242,12 @@
SMSC_TRACE(pdata, ifdown, "Stopping driver");
+ if (pdata->config.has_reset_gpio) {
+ gpio_set_value_cansleep(pdata->config.reset_gpio, 0);
+ gpio_free(pdata->config.reset_gpio);
+ }
+
+
phy_disconnect(pdata->phy_dev);
pdata->phy_dev = NULL;
mdiobus_unregister(pdata->mii_bus);
@@ -2436,9 +2443,10 @@
smsc911x_reg_write(pdata, INT_EN, 0);
smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
- retval = request_irq(dev->irq, smsc911x_irqhandler,
- irq_flags | IRQF_SHARED, dev->name, dev);
- if (retval) {
+ retval = request_any_context_irq(dev->irq, smsc911x_irqhandler,
+ irq_flags | IRQF_SHARED, dev->name,
+ dev);
+ if (retval < 0) {
SMSC_WARN(pdata, probe,
"Unable to claim requested irq: %d", dev->irq);
goto out_disable_resources;
@@ -2528,6 +2536,10 @@
PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ |
PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_);
+ /* Drive the GPIO Ethernet_Reset Line low to Suspend */
+ if (pdata->config.has_reset_gpio)
+ gpio_set_value_cansleep(pdata->config.reset_gpio, 0);
+
return 0;
}
@@ -2537,6 +2549,10 @@
struct smsc911x_data *pdata = netdev_priv(ndev);
unsigned int to = 100;
+ if (pdata->config.has_reset_gpio)
+ gpio_set_value_cansleep(pdata->config.reset_gpio, 1);
+
+
/* Note 3.11 from the datasheet:
* "When the LAN9220 is in a power saving state, a write of any
* data to the BYTE_TEST register will wake-up the device."
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 9ad5e5d..43e5398 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -236,7 +236,7 @@
#define PMT_CTRL_PME_EN_ 0x00000002
#define PMT_CTRL_READY_ 0x00000001
-#define GPIO_CFG 0x88
+#define SMSC_GPIO_CFG 0x88
#define GPIO_CFG_LED3_EN_ 0x40000000
#define GPIO_CFG_LED2_EN_ 0x20000000
#define GPIO_CFG_LED1_EN_ 0x10000000