Initial Contribution

msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 906ef8f..f6c44c6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2561,6 +2561,15 @@
 	  ML7223 is companion chip for Intel Atom E6xx series.
 	  ML7223 is completely compatible for Intel EG20T PCH.
 
+config QFEC
+	tristate "QFEC ethernet driver"
+	select MII
+	depends on ARM
+	help
+	  This driver supports Ethernet in the FSM9xxx.
+	  To compile this driver as a module, choose M here: the
+	  module will be called qfec.
+
 endif # NETDEV_1000
 
 #
@@ -3431,6 +3440,38 @@
 	If you want to log kernel messages over the network, enable this.
 	See <file:Documentation/networking/netconsole.txt> for details.
 
+config MSM_RMNET
+	tristate "MSM RMNET Virtual Network Device"
+	depends on ARCH_MSM
+	default y
+	help
+	  Virtual ethernet interface for MSM RMNET transport.
+
+config MSM_RMNET_SDIO
+	bool "RMNET SDIO Driver"
+	depends on MSM_SDIO_DMUX
+	default n
+	help
+	  Implements RMNET over SDIO interface.
+
+config MSM_RMNET_BAM
+	bool "RMNET BAM Driver"
+	depends on MSM_BAM_DMUX
+	default n
+	help
+	  Implements RMNET over BAM interface.
+	  RMNET provides a virtual ethernet interface
+	  for routing IP packets within the MSM using
+	  BAM as a physical transport.
+
+config MSM_RMNET_DEBUG
+	bool "MSM RMNET debug interface"
+	depends on MSM_RMNET
+	default n
+	help
+	  Debug stats on wakeup counts.
+
+
 config NETCONSOLE_DYNAMIC
 	bool "Dynamic reconfiguration of logging targets"
 	depends on NETCONSOLE && SYSFS && CONFIGFS_FS && \
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 13ef4df..43079b3 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -269,6 +269,7 @@
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_MACB) += macb.o
 obj-$(CONFIG_S6GMAC) += s6gmac.o
+obj-$(CONFIG_QFEC) += qfec.o
 
 obj-$(CONFIG_ARM) += arm/
 obj-$(CONFIG_DEV_APPLETALK) += appletalk/
@@ -298,6 +299,11 @@
 obj-$(CONFIG_FS_ENET) += fs_enet/
 
 obj-$(CONFIG_NETXEN_NIC) += netxen/
+
+obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o
+obj-$(CONFIG_MSM_RMNET_SDIO) += msm_rmnet_sdio.o
+obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o
+
 obj-$(CONFIG_NIU) += niu.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_SFC) += sfc/
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index bcd9ba6..f79dce2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -21,7 +21,7 @@
 #include <linux/cache.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
-
+#include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
 
 #include "ks8851.h"
@@ -127,6 +127,8 @@
 	struct spi_message	spi_msg2;
 	struct spi_transfer	spi_xfer1;
 	struct spi_transfer	spi_xfer2[2];
+	struct regulator	*vdd_io;
+	struct regulator	*vdd_phy;
 };
 
 static int msg_enable;
@@ -1592,6 +1594,15 @@
 
 	ks = netdev_priv(ndev);
 
+	ks->vdd_io = regulator_get(&spi->dev, "vdd_io");
+	ks->vdd_phy = regulator_get(&spi->dev, "vdd_phy");
+
+	if (!IS_ERR(ks->vdd_io))
+		regulator_enable(ks->vdd_io);
+
+	if (!IS_ERR(ks->vdd_phy))
+		regulator_enable(ks->vdd_phy);
+
 	ks->netdev = ndev;
 	ks->spidev = spi;
 	ks->tx_space = 6144;
@@ -1686,6 +1697,16 @@
 err_id:
 err_irq:
 	free_netdev(ndev);
+
+	if (!IS_ERR(ks->vdd_io)) {
+		regulator_disable(ks->vdd_phy);
+		regulator_put(ks->vdd_io);
+	}
+
+	if (!IS_ERR(ks->vdd_phy)) {
+		regulator_disable(ks->vdd_phy);
+		regulator_put(ks->vdd_phy);
+	}
 	return ret;
 }
 
@@ -1696,6 +1717,16 @@
 	if (netif_msg_drv(priv))
 		dev_info(&spi->dev, "remove\n");
 
+	if (!IS_ERR(priv->vdd_io)) {
+		regulator_disable(priv->vdd_phy);
+		regulator_put(priv->vdd_io);
+	}
+
+	if (!IS_ERR(priv->vdd_phy)) {
+		regulator_disable(priv->vdd_phy);
+		regulator_put(priv->vdd_phy);
+	}
+
 	unregister_netdev(priv->netdev);
 	free_irq(spi->irq, priv);
 	free_netdev(priv->netdev);
diff --git a/drivers/net/msm_rmnet.c b/drivers/net/msm_rmnet.c
new file mode 100644
index 0000000..6889425
--- /dev/null
+++ b/drivers/net/msm_rmnet.c
@@ -0,0 +1,849 @@
+/* linux/drivers/net/msm_rmnet.c
+ *
+ * Virtual Ethernet Interface for MSM7K Networking
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/platform_device.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <mach/msm_smd.h>
+#include <mach/peripheral-loader.h>
+
+/* Debug message support */
+static int msm_rmnet_debug_mask;
+module_param_named(debug_enable, msm_rmnet_debug_mask,
+			int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do {			\
+		if (msm_rmnet_debug_mask & m)   \
+			pr_info(x);		\
+} while (0)
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_DEVICE_COUNT (8)
+static const char *ch_name[RMNET_DEVICE_COUNT] = {
+	"DATA5",
+	"DATA6",
+	"DATA7",
+	"DATA8",
+	"DATA9",
+	"DATA12",
+	"DATA13",
+	"DATA14",
+};
+
+/* XXX should come from smd headers */
+#define SMD_PORT_ETHER0 11
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define HEADROOM_FOR_QOS    8
+
+static struct completion *port_complete[RMNET_DEVICE_COUNT];
+
+struct rmnet_private
+{
+	smd_channel_t *ch;
+	struct net_device_stats stats;
+	const char *chname;
+	struct wake_lock wake_lock;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	ktime_t last_packet;
+	unsigned long wakeups_xmit;
+	unsigned long wakeups_rcv;
+	unsigned long timeout_us;
+#endif
+	struct sk_buff *skb;
+	spinlock_t lock;
+	struct tasklet_struct tsklt;
+	u32 operation_mode;    /* IOCTL specified mode (protocol, QoS header) */
+	struct platform_driver pdrv;
+	struct completion complete;
+	void *pil;
+	struct mutex pil_lock;
+};
+
+static uint msm_rmnet_modem_wait;
+module_param_named(modem_wait, msm_rmnet_modem_wait,
+		   uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static int count_this_packet(void *_hdr, int len)
+{
+	struct ethhdr *hdr = _hdr;
+
+	if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+		return 0;
+
+	return 1;
+}
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n)
+{
+	timeout_suspend_us = simple_strtoul(buf, NULL, 10);
+	return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler) {
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_suspend_us;
+	}
+}
+
+static void rmnet_late_resume(struct early_suspend *handler) {
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_us;
+	}
+}
+
+static struct early_suspend rmnet_power_suspend = {
+	.suspend = rmnet_early_suspend,
+	.resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+	register_early_suspend(&rmnet_power_suspend);
+	return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p) {
+	int ret = 0;
+	ktime_t now;
+	if (p->timeout_us == 0) /* Check if disabled */
+		return 0;
+
+	/* Use real (wall) time. */
+	now = ktime_get_real();
+
+	if (ktime_us_delta(now, p->last_packet) > p->timeout_us) {
+		ret = 1;
+	}
+	p->last_packet = now;
+	return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
+		char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
+		const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p->timeout_us = timeout_us = simple_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+	timeout_us = simple_strtoul(buf, NULL, 10);
+#endif
+	return n;
+}
+
+static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
+			    char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	__be16 protocol = 0;
+
+	skb->dev = dev;
+
+	/* Determine L3 protocol */
+	switch (skb->data[0] & 0xf0) {
+	case 0x40:
+		protocol = htons(ETH_P_IP);
+		break;
+	case 0x60:
+		protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+		       dev->name, skb->data[0] & 0xf0);
+		/* skb will be dropped in uppder layer for unknown protocol */
+	}
+	return protocol;
+}
+
+/* Called in soft-irq context */
+static void smd_net_data_handler(unsigned long arg)
+{
+	struct net_device *dev = (struct net_device *) arg;
+	struct rmnet_private *p = netdev_priv(dev);
+	struct sk_buff *skb;
+	void *ptr = 0;
+	int sz;
+	u32 opmode = p->operation_mode;
+	unsigned long flags;
+
+	for (;;) {
+		sz = smd_cur_packet_size(p->ch);
+		if (sz == 0) break;
+		if (smd_read_avail(p->ch) < sz) break;
+
+		if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
+						(sz > (dev->mtu + ETH_HLEN))) {
+			pr_err("[%s] rmnet_recv() discarding packet len %d (%d mtu)\n",
+				dev->name, sz, RMNET_IS_MODE_IP(opmode) ?
+					dev->mtu : (dev->mtu + ETH_HLEN));
+			ptr = 0;
+		} else {
+			skb = dev_alloc_skb(sz + NET_IP_ALIGN);
+			if (skb == NULL) {
+				pr_err("[%s] rmnet_recv() cannot allocate skb\n",
+				       dev->name);
+			} else {
+				skb->dev = dev;
+				skb_reserve(skb, NET_IP_ALIGN);
+				ptr = skb_put(skb, sz);
+				wake_lock_timeout(&p->wake_lock, HZ / 2);
+				if (smd_read(p->ch, ptr, sz) != sz) {
+					pr_err("[%s] rmnet_recv() smd lied about avail?!",
+						dev->name);
+					ptr = 0;
+					dev_kfree_skb_irq(skb);
+				} else {
+					/* Handle Rx frame format */
+					spin_lock_irqsave(&p->lock, flags);
+					opmode = p->operation_mode;
+					spin_unlock_irqrestore(&p->lock, flags);
+
+					if (RMNET_IS_MODE_IP(opmode)) {
+						/* Driver in IP mode */
+						skb->protocol =
+						  rmnet_ip_type_trans(skb, dev);
+					} else {
+						/* Driver in Ethernet mode */
+						skb->protocol =
+						  eth_type_trans(skb, dev);
+					}
+					if (RMNET_IS_MODE_IP(opmode) ||
+					    count_this_packet(ptr, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+						p->wakeups_rcv +=
+							rmnet_cause_wakeup(p);
+#endif
+						p->stats.rx_packets++;
+						p->stats.rx_bytes += skb->len;
+					}
+					DBG1("[%s] Rx packet #%lu len=%d\n",
+						dev->name, p->stats.rx_packets,
+						skb->len);
+
+					/* Deliver to network stack */
+					netif_rx(skb);
+				}
+				continue;
+			}
+		}
+		if (smd_read(p->ch, ptr, sz) != sz)
+			pr_err("[%s] rmnet_recv() smd lied about avail?!",
+				dev->name);
+	}
+}
+
+static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	smd_channel_t *ch = p->ch;
+	int smd_ret;
+	struct QMI_QOS_HDR_S *qmih;
+	u32 opmode;
+	unsigned long flags;
+
+	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+	spin_lock_irqsave(&p->lock, flags);
+	opmode = p->operation_mode;
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (RMNET_IS_MODE_QOS(opmode)) {
+		qmih = (struct QMI_QOS_HDR_S *)
+			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+		qmih->version = 1;
+		qmih->flags = 0;
+		qmih->flow_id = skb->mark;
+	}
+
+	dev->trans_start = jiffies;
+	smd_ret = smd_write(ch, skb->data, skb->len);
+	if (smd_ret != skb->len) {
+		pr_err("[%s] %s: smd_write returned error %d",
+			dev->name, __func__, smd_ret);
+		p->stats.tx_errors++;
+		goto xmit_out;
+	}
+
+	if (RMNET_IS_MODE_IP(opmode) ||
+	    count_this_packet(skb->data, skb->len)) {
+		p->stats.tx_packets++;
+		p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+	}
+	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+	    dev->name, p->stats.tx_packets, skb->len, skb->mark);
+
+xmit_out:
+	/* data xmited, safe to release skb */
+	dev_kfree_skb_irq(skb);
+	return 0;
+}
+
+static void _rmnet_resume_flow(unsigned long param)
+{
+	struct net_device *dev = (struct net_device *)param;
+	struct rmnet_private *p = netdev_priv(dev);
+	struct sk_buff *skb = NULL;
+	unsigned long flags;
+
+	/* xmit and enable the flow only once even if
+	   multiple tasklets were scheduled by smd_net_notify */
+	spin_lock_irqsave(&p->lock, flags);
+	if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
+		skb = p->skb;
+		p->skb = NULL;
+		spin_unlock_irqrestore(&p->lock, flags);
+		_rmnet_xmit(skb, dev);
+		netif_wake_queue(dev);
+	} else
+		spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void msm_rmnet_unload_modem(void *pil)
+{
+	if (pil)
+		pil_put(pil);
+}
+
+static void *msm_rmnet_load_modem(struct net_device *dev)
+{
+	void *pil;
+	int rc;
+	struct rmnet_private *p = netdev_priv(dev);
+
+	pil = pil_get("modem");
+	if (IS_ERR(pil))
+		pr_err("[%s] %s: modem load failed\n",
+			dev->name, __func__);
+	else if (msm_rmnet_modem_wait) {
+		rc = wait_for_completion_interruptible_timeout(
+			&p->complete,
+			msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
+		if (!rc)
+			rc = -ETIMEDOUT;
+		if (rc < 0) {
+			pr_err("[%s] %s: wait for rmnet port failed %d\n",
+			       dev->name, __func__, rc);
+			msm_rmnet_unload_modem(pil);
+			pil = ERR_PTR(rc);
+		}
+	}
+
+	return pil;
+}
+
+static void smd_net_notify(void *_dev, unsigned event)
+{
+	struct rmnet_private *p = netdev_priv((struct net_device *)_dev);
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		spin_lock(&p->lock);
+		if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
+			smd_disable_read_intr(p->ch);
+			tasklet_hi_schedule(&p->tsklt);
+		}
+
+		spin_unlock(&p->lock);
+
+		if (smd_read_avail(p->ch) &&
+			(smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) {
+			smd_net_data_tasklet.data = (unsigned long) _dev;
+			tasklet_schedule(&smd_net_data_tasklet);
+		}
+		break;
+
+	case SMD_EVENT_OPEN:
+		DBG0("%s: opening SMD port\n", __func__);
+		netif_carrier_on(_dev);
+		if (netif_queue_stopped(_dev)) {
+			DBG0("%s: re-starting if queue\n", __func__);
+			netif_wake_queue(_dev);
+		}
+		break;
+
+	case SMD_EVENT_CLOSE:
+		DBG0("%s: closing SMD port\n", __func__);
+		netif_carrier_off(_dev);
+		break;
+	}
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+	int r;
+	void *pil;
+	struct rmnet_private *p = netdev_priv(dev);
+
+	mutex_lock(&p->pil_lock);
+	if (!p->pil) {
+		pil = msm_rmnet_load_modem(dev);
+		if (IS_ERR(pil)) {
+			mutex_unlock(&p->pil_lock);
+			return PTR_ERR(pil);
+		}
+		p->pil = pil;
+	}
+	mutex_unlock(&p->pil_lock);
+
+	if (!p->ch) {
+		r = smd_open(p->chname, &p->ch, dev, smd_net_notify);
+
+		if (r < 0)
+			return -ENODEV;
+	}
+
+	smd_disable_read_intr(p->ch);
+	return 0;
+}
+
+static int __rmnet_close(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int rc;
+	unsigned long flags;
+
+	if (p->ch) {
+		rc = smd_close(p->ch);
+		spin_lock_irqsave(&p->lock, flags);
+		p->ch = 0;
+		spin_unlock_irqrestore(&p->lock, flags);
+		return rc;
+	} else
+		return -EBADF;
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	DBG0("[%s] rmnet_open()\n", dev->name);
+
+	rc = __rmnet_open(dev);
+	if (rc == 0)
+		netif_start_queue(dev);
+
+	return rc;
+}
+
+static int rmnet_stop(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+
+	DBG0("[%s] rmnet_stop()\n", dev->name);
+
+	netif_stop_queue(dev);
+	tasklet_kill(&p->tsklt);
+
+	/* TODO: unload modem safely,
+	   currently, this causes unnecessary unloads */
+	/*
+	mutex_lock(&p->pil_lock);
+	msm_rmnet_unload_modem(p->pil);
+	p->pil = NULL;
+	mutex_unlock(&p->pil_lock);
+	*/
+
+	return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+		return -EINVAL;
+
+	DBG0("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	smd_channel_t *ch = p->ch;
+	unsigned long flags;
+
+	if (netif_queue_stopped(dev)) {
+		pr_err("[%s] fatal: rmnet_xmit called when netif_queue is stopped",
+			dev->name);
+		return 0;
+	}
+
+	spin_lock_irqsave(&p->lock, flags);
+	smd_enable_read_intr(ch);
+	if (smd_write_avail(ch) < skb->len) {
+		netif_stop_queue(dev);
+		p->skb = skb;
+		spin_unlock_irqrestore(&p->lock, flags);
+		return 0;
+	}
+	smd_disable_read_intr(ch);
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	_rmnet_xmit(skb, dev);
+
+	return 0;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+	pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+
+static const struct net_device_ops rmnet_ops_ether = {
+	.ndo_open		= rmnet_open,
+	.ndo_stop		= rmnet_stop,
+	.ndo_start_xmit		= rmnet_xmit,
+	.ndo_get_stats		= rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout		= rmnet_tx_timeout,
+	.ndo_do_ioctl		= rmnet_ioctl,
+	.ndo_change_mtu		= rmnet_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+	.ndo_open		= rmnet_open,
+	.ndo_stop		= rmnet_stop,
+	.ndo_start_xmit		= rmnet_xmit,
+	.ndo_get_stats		= rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout		= rmnet_tx_timeout,
+	.ndo_do_ioctl		= rmnet_ioctl,
+	.ndo_change_mtu		= rmnet_change_mtu,
+	.ndo_set_mac_address	= 0,
+	.ndo_validate_addr	= 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	u32 old_opmode = p->operation_mode;
+	unsigned long flags;
+	int prev_mtu = dev->mtu;
+	int rc = 0;
+
+	/* Process IOCTL command */
+	switch (cmd) {
+	case RMNET_IOCTL_SET_LLP_ETHERNET:  /* Set Ethernet protocol   */
+		/* Perform Ethernet config only if in IP mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_IP) {
+			ether_setup(dev);
+			random_ether_addr(dev->dev_addr);
+			dev->mtu = prev_mtu;
+
+			dev->netdev_ops = &rmnet_ops_ether;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_IP;
+			p->operation_mode |= RMNET_MODE_LLP_ETH;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set Ethernet protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_SET_LLP_IP:        /* Set RAWIP protocol      */
+		/* Perform IP config only if in Ethernet mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+			/* Undo config done in ether_setup() */
+			dev->header_ops         = 0;  /* No header */
+			dev->type               = ARPHRD_RAWIP;
+			dev->hard_header_len    = 0;
+			dev->mtu                = prev_mtu;
+			dev->addr_len           = 0;
+			dev->flags              &= ~(IFF_BROADCAST|
+						     IFF_MULTICAST);
+
+			dev->netdev_ops = &rmnet_ops_ip;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+			p->operation_mode |= RMNET_MODE_LLP_IP;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_GET_LLP:           /* Get link protocol state */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode &
+				(RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+		break;
+
+	case RMNET_IOCTL_SET_QOS_ENABLE:    /* Set QoS header enabled  */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode |= RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_SET_QOS_DISABLE:   /* Set QoS header disabled */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode &= ~RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_GET_QOS:           /* Get QoS header state    */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode & RMNET_MODE_QOS);
+		break;
+
+	case RMNET_IOCTL_GET_OPMODE:        /* Get operation mode      */
+		ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+		break;
+
+	case RMNET_IOCTL_OPEN:              /* Open transport port     */
+		rc = __rmnet_open(dev);
+		DBG0("[%s] rmnet_ioctl(): open transport port\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_CLOSE:             /* Close transport port    */
+		rc = __rmnet_close(dev);
+		DBG0("[%s] rmnet_ioctl(): close transport port\n",
+			dev->name);
+		break;
+
+	default:
+		pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+			dev->name, cmd);
+		return -EINVAL;
+	}
+
+	DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+		dev->name, __func__, cmd, old_opmode, p->operation_mode);
+	return rc;
+}
+
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+	/* Using Ethernet mode by default */
+	dev->netdev_ops = &rmnet_ops_ether;
+	ether_setup(dev);
+
+	/* set this after calling ether_setup */
+	dev->mtu = RMNET_DATA_LEN;
+	dev->needed_headroom = HEADROOM_FOR_QOS;
+
+	random_ether_addr(dev->dev_addr);
+
+	dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+static int msm_rmnet_smd_probe(struct platform_device *pdev)
+{
+	int i;
+
+	for (i = 0; i < RMNET_DEVICE_COUNT; i++)
+		if (!strcmp(pdev->name, ch_name[i])) {
+			complete_all(port_complete[i]);
+			break;
+		}
+
+	return 0;
+}
+
+static int __init rmnet_init(void)
+{
+	int ret;
+	struct device *d;
+	struct net_device *dev;
+	struct rmnet_private *p;
+	unsigned n;
+
+	pr_info("%s: SMD devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	timeout_suspend_us = 0;
+#endif
+#endif
+
+	for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+		dev = alloc_netdev(sizeof(struct rmnet_private),
+				   "rmnet%d", rmnet_setup);
+
+		if (!dev)
+			return -ENOMEM;
+
+		d = &(dev->dev);
+		p = netdev_priv(dev);
+		p->chname = ch_name[n];
+		/* Initial config uses Ethernet */
+		p->operation_mode = RMNET_MODE_LLP_ETH;
+		p->skb = NULL;
+		spin_lock_init(&p->lock);
+		tasklet_init(&p->tsklt, _rmnet_resume_flow,
+				(unsigned long)dev);
+		wake_lock_init(&p->wake_lock, WAKE_LOCK_SUSPEND, ch_name[n]);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->timeout_us = timeout_us;
+		p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+		init_completion(&p->complete);
+		port_complete[n] = &p->complete;
+		mutex_init(&p->pil_lock);
+		p->pdrv.probe = msm_rmnet_smd_probe;
+		p->pdrv.driver.name = ch_name[n];
+		p->pdrv.driver.owner = THIS_MODULE;
+		ret = platform_driver_register(&p->pdrv);
+		if (ret) {
+			free_netdev(dev);
+			return ret;
+		}
+
+		ret = register_netdev(dev);
+		if (ret) {
+			platform_driver_unregister(&p->pdrv);
+			free_netdev(dev);
+			return ret;
+		}
+
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		if (device_create_file(d, &dev_attr_timeout))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_xmit))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_rcv))
+			continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		if (device_create_file(d, &dev_attr_timeout_suspend))
+			continue;
+
+		/* Only care about rmnet0 for suspend/resume tiemout hooks. */
+		if (n == 0)
+			rmnet0 = d;
+#endif
+#endif
+	}
+	return 0;
+}
+
+module_init(rmnet_init);
diff --git a/drivers/net/msm_rmnet_bam.c b/drivers/net/msm_rmnet_bam.c
new file mode 100644
index 0000000..a8bdeb3
--- /dev/null
+++ b/drivers/net/msm_rmnet_bam.c
@@ -0,0 +1,653 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * RMNET BAM Module.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <mach/bam_dmux.h>
+
+/* Debug message support */
+static int msm_rmnet_bam_debug_mask;
+module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
+			int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do {			   \
+		if (msm_rmnet_bam_debug_mask & m) \
+			pr_info(x);		   \
+} while (0)
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_DEVICE_COUNT (8)
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define DEVICE_ID_INVALID   -1
+
+#define DEVICE_INACTIVE      0
+#define DEVICE_ACTIVE        1
+
+#define HEADROOM_FOR_BAM   8 /* for mux header */
+#define HEADROOM_FOR_QOS    8
+#define TAILROOM            8 /* for padding by mux layer */
+
+struct rmnet_private {
+	struct net_device_stats stats;
+	uint32_t ch_id;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	ktime_t last_packet;
+	unsigned long wakeups_xmit;
+	unsigned long wakeups_rcv;
+	unsigned long timeout_us;
+#endif
+	struct sk_buff *skb;
+	spinlock_t lock;
+	struct tasklet_struct tsklt;
+	u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
+	uint8_t device_up;
+};
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d,
+				     struct device_attribute *attr,
+				     const char *buf, size_t n)
+{
+	timeout_suspend_us = strict_strtoul(buf, NULL, 10);
+	return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
+		   timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_suspend_us;
+	}
+}
+
+static void rmnet_late_resume(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_us;
+	}
+}
+
+static struct early_suspend rmnet_power_suspend = {
+	.suspend = rmnet_early_suspend,
+	.resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+	register_early_suspend(&rmnet_power_suspend);
+	return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p)
+{
+	int ret = 0;
+	ktime_t now;
+	if (p->timeout_us == 0) /* Check if disabled */
+		return 0;
+
+	/* Use real (wall) time. */
+	now = ktime_get_real();
+
+	if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
+		ret = 1;
+
+	p->last_packet = now;
+	return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
+			     const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+	timeout_us = strict_strtoul(buf, NULL, 10);
+#endif
+	return n;
+}
+
+static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
+			    char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif
+
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	__be16 protocol = 0;
+
+	skb->dev = dev;
+
+	/* Determine L3 protocol */
+	switch (skb->data[0] & 0xf0) {
+	case 0x40:
+		protocol = htons(ETH_P_IP);
+		break;
+	case 0x60:
+		protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+		       dev->name, skb->data[0] & 0xf0);
+		/* skb will be dropped in upper layer for unknown protocol */
+	}
+	return protocol;
+}
+
+static int count_this_packet(void *_hdr, int len)
+{
+	struct ethhdr *hdr = _hdr;
+
+	if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+		return 0;
+
+	return 1;
+}
+
+/* Rx Callback, Called in Work Queue context */
+static void bam_recv_notify(void *dev, struct sk_buff *skb)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	unsigned long flags;
+	u32 opmode;
+
+	if (skb) {
+		skb->dev = dev;
+		/* Handle Rx frame format */
+		spin_lock_irqsave(&p->lock, flags);
+		opmode = p->operation_mode;
+		spin_unlock_irqrestore(&p->lock, flags);
+
+		if (RMNET_IS_MODE_IP(opmode)) {
+			/* Driver in IP mode */
+			skb->protocol = rmnet_ip_type_trans(skb, dev);
+		} else {
+			/* Driver in Ethernet mode */
+			skb->protocol = eth_type_trans(skb, dev);
+		}
+		if (RMNET_IS_MODE_IP(opmode) ||
+		    count_this_packet(skb->data, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+			p->wakeups_rcv += rmnet_cause_wakeup(p);
+#endif
+			p->stats.rx_packets++;
+			p->stats.rx_bytes += skb->len;
+		}
+		DBG1("[%s] Rx packet #%lu len=%d\n",
+			((struct net_device *)dev)->name,
+			p->stats.rx_packets, skb->len);
+
+		/* Deliver to network stack */
+		netif_rx(skb);
+	} else
+		pr_err("[%s] %s: No skb received",
+			((struct net_device *)dev)->name, __func__);
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int bam_ret;
+	struct QMI_QOS_HDR_S *qmih;
+	u32 opmode;
+	unsigned long flags;
+
+	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+	spin_lock_irqsave(&p->lock, flags);
+	opmode = p->operation_mode;
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (RMNET_IS_MODE_QOS(opmode)) {
+		qmih = (struct QMI_QOS_HDR_S *)
+			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+		qmih->version = 1;
+		qmih->flags = 0;
+		qmih->flow_id = skb->mark;
+	}
+
+	dev->trans_start = jiffies;
+	bam_ret = msm_bam_dmux_write(p->ch_id, skb);
+
+	if (bam_ret != 0) {
+		pr_err("[%s] %s: write returned error %d",
+			dev->name, __func__, bam_ret);
+		goto xmit_out;
+	}
+
+	if (count_this_packet(skb->data, skb->len)) {
+		p->stats.tx_packets++;
+		p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+	}
+	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+	    dev->name, p->stats.tx_packets, skb->len, skb->mark);
+
+	return 0;
+xmit_out:
+	/* data xmited, safe to release skb */
+	dev_kfree_skb_any(skb);
+	return 0;
+}
+
+static void bam_write_done(void *dev, struct sk_buff *skb)
+{
+	DBG1("%s: write complete\n", __func__);
+	dev_kfree_skb_any(skb);
+	netif_wake_queue(dev);
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+	int r;
+	struct rmnet_private *p = netdev_priv(dev);
+
+	DBG0("[%s] __rmnet_open()\n", dev->name);
+
+	if (!p->device_up) {
+		r = msm_bam_dmux_open(p->ch_id, dev,
+				       bam_recv_notify, bam_write_done);
+
+		if (r < 0)
+			return -ENODEV;
+	}
+
+	p->device_up = DEVICE_ACTIVE;
+	return 0;
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	DBG0("[%s] rmnet_open()\n", dev->name);
+
+	rc = __rmnet_open(dev);
+
+	if (rc == 0)
+		netif_start_queue(dev);
+
+	return rc;
+}
+
+
+static int __rmnet_close(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int rc = 0;
+
+	if (p->device_up) {
+		/* do not close rmnet port once up,  this causes
+		   remote side to hang if tried to open again */
+		p->device_up = DEVICE_INACTIVE;
+		return rc;
+	} else
+		return -EBADF;
+}
+
+
+static int rmnet_stop(struct net_device *dev)
+{
+	DBG0("[%s] rmnet_stop()\n", dev->name);
+
+	__rmnet_close(dev);
+	netif_stop_queue(dev);
+
+	return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+		return -EINVAL;
+
+	DBG0("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	if (netif_queue_stopped(dev)) {
+		pr_err("[%s]fatal: rmnet_xmit called when "
+			"netif_queue is stopped", dev->name);
+		return 0;
+	}
+
+	netif_stop_queue(dev);
+	_rmnet_xmit(skb, dev);
+
+	return 0;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+	pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+static const struct net_device_ops rmnet_ops_ether = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	u32 old_opmode = p->operation_mode;
+	unsigned long flags;
+	int prev_mtu = dev->mtu;
+	int rc = 0;
+
+	/* Process IOCTL command */
+	switch (cmd) {
+	case RMNET_IOCTL_SET_LLP_ETHERNET:  /* Set Ethernet protocol   */
+		/* Perform Ethernet config only if in IP mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_IP) {
+			ether_setup(dev);
+			random_ether_addr(dev->dev_addr);
+			dev->mtu = prev_mtu;
+
+			dev->netdev_ops = &rmnet_ops_ether;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_IP;
+			p->operation_mode |= RMNET_MODE_LLP_ETH;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set Ethernet protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_SET_LLP_IP:        /* Set RAWIP protocol      */
+		/* Perform IP config only if in Ethernet mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+			/* Undo config done in ether_setup() */
+			dev->header_ops         = 0;  /* No header */
+			dev->type               = ARPHRD_RAWIP;
+			dev->hard_header_len    = 0;
+			dev->mtu                = prev_mtu;
+			dev->addr_len           = 0;
+			dev->flags              &= ~(IFF_BROADCAST|
+						     IFF_MULTICAST);
+
+			dev->needed_headroom = HEADROOM_FOR_BAM +
+			  HEADROOM_FOR_QOS;
+			dev->needed_tailroom = TAILROOM;
+			dev->netdev_ops = &rmnet_ops_ip;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+			p->operation_mode |= RMNET_MODE_LLP_IP;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set IP protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_GET_LLP:           /* Get link protocol state */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode &
+				 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+		break;
+
+	case RMNET_IOCTL_SET_QOS_ENABLE:    /* Set QoS header enabled  */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode |= RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_SET_QOS_DISABLE:   /* Set QoS header disabled */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode &= ~RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_GET_QOS:           /* Get QoS header state    */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode & RMNET_MODE_QOS);
+		break;
+
+	case RMNET_IOCTL_GET_OPMODE:        /* Get operation mode      */
+		ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+		break;
+
+	case RMNET_IOCTL_OPEN:              /* Open transport port     */
+		rc = __rmnet_open(dev);
+		DBG0("[%s] rmnet_ioctl(): open transport port\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_CLOSE:             /* Close transport port    */
+		rc = __rmnet_close(dev);
+		DBG0("[%s] rmnet_ioctl(): close transport port\n",
+			dev->name);
+		break;
+
+	default:
+		pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+			dev->name, cmd);
+		return -EINVAL;
+	}
+
+	DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+		dev->name, __func__, cmd, old_opmode, p->operation_mode);
+	return rc;
+}
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+	/* Using Ethernet mode by default */
+	dev->netdev_ops = &rmnet_ops_ether;
+	ether_setup(dev);
+
+	/* set this after calling ether_setup */
+	dev->mtu = RMNET_DATA_LEN;
+	dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
+	dev->needed_tailroom = TAILROOM;
+	random_ether_addr(dev->dev_addr);
+
+	dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+
+static int __init rmnet_init(void)
+{
+	int ret;
+	struct device *d;
+	struct net_device *dev;
+	struct rmnet_private *p;
+	unsigned n;
+
+	pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	timeout_suspend_us = 0;
+#endif
+#endif
+
+	for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+		dev = alloc_netdev(sizeof(struct rmnet_private),
+				   "rmnet%d", rmnet_setup);
+
+		if (!dev)
+			return -ENOMEM;
+
+		d = &(dev->dev);
+		p = netdev_priv(dev);
+		/* Initial config uses Ethernet */
+		p->operation_mode = RMNET_MODE_LLP_ETH;
+		p->ch_id = n;
+		spin_lock_init(&p->lock);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->timeout_us = timeout_us;
+		p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+		ret = register_netdev(dev);
+		if (ret) {
+			free_netdev(dev);
+			return ret;
+		}
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		if (device_create_file(d, &dev_attr_timeout))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_xmit))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_rcv))
+			continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		if (device_create_file(d, &dev_attr_timeout_suspend))
+			continue;
+
+		/* Only care about rmnet0 for suspend/resume tiemout hooks. */
+		if (n == 0)
+			rmnet0 = d;
+#endif
+#endif
+	}
+	return 0;
+}
+
+module_init(rmnet_init);
+MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/net/msm_rmnet_sdio.c b/drivers/net/msm_rmnet_sdio.c
new file mode 100644
index 0000000..883c649
--- /dev/null
+++ b/drivers/net/msm_rmnet_sdio.c
@@ -0,0 +1,704 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * RMNET SDIO Module.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wakelock.h>
+#include <linux/if_arp.h>
+#include <linux/msm_rmnet.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <mach/sdio_dmux.h>
+
+/* Debug message support */
+static int msm_rmnet_sdio_debug_mask;
+module_param_named(debug_enable, msm_rmnet_sdio_debug_mask,
+			int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DEBUG_MASK_LVL0 (1U << 0)
+#define DEBUG_MASK_LVL1 (1U << 1)
+#define DEBUG_MASK_LVL2 (1U << 2)
+
+#define DBG(m, x...) do {			   \
+		if (msm_rmnet_sdio_debug_mask & m) \
+			pr_info(x);		   \
+} while (0)
+#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
+#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
+#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
+
+/* Configure device instances */
+#define RMNET_DEVICE_COUNT (8)
+
+/* allow larger frames */
+#define RMNET_DATA_LEN 2000
+
+#define DEVICE_ID_INVALID   -1
+
+#define DEVICE_INACTIVE      0
+#define DEVICE_ACTIVE        1
+
+#define HEADROOM_FOR_SDIO   8 /* for mux header */
+#define HEADROOM_FOR_QOS    8
+#define TAILROOM            8 /* for padding by mux layer */
+
+struct rmnet_private {
+	struct net_device_stats stats;
+	uint32_t ch_id;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	ktime_t last_packet;
+	unsigned long wakeups_xmit;
+	unsigned long wakeups_rcv;
+	unsigned long timeout_us;
+#endif
+	struct sk_buff *skb;
+	spinlock_t lock;
+	struct tasklet_struct tsklt;
+	u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
+	uint8_t device_up;
+	uint8_t in_reset;
+};
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+static unsigned long timeout_us;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+/*
+ * If early suspend is enabled then we specify two timeout values,
+ * screen on (default), and screen is off.
+ */
+static unsigned long timeout_suspend_us;
+static struct device *rmnet0;
+
+/* Set timeout in us when the screen is off. */
+static ssize_t timeout_suspend_store(struct device *d,
+				     struct device_attribute *attr,
+				     const char *buf, size_t n)
+{
+	timeout_suspend_us = strict_strtoul(buf, NULL, 10);
+	return n;
+}
+
+static ssize_t timeout_suspend_show(struct device *d,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
+}
+
+static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
+		   timeout_suspend_store);
+
+static void rmnet_early_suspend(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_suspend_us;
+	}
+}
+
+static void rmnet_late_resume(struct early_suspend *handler)
+{
+	if (rmnet0) {
+		struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
+		p->timeout_us = timeout_us;
+	}
+}
+
+static struct early_suspend rmnet_power_suspend = {
+	.suspend = rmnet_early_suspend,
+	.resume = rmnet_late_resume,
+};
+
+static int __init rmnet_late_init(void)
+{
+	register_early_suspend(&rmnet_power_suspend);
+	return 0;
+}
+
+late_initcall(rmnet_late_init);
+#endif
+
+/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
+static int rmnet_cause_wakeup(struct rmnet_private *p)
+{
+	int ret = 0;
+	ktime_t now;
+	if (p->timeout_us == 0) /* Check if disabled */
+		return 0;
+
+	/* Use real (wall) time. */
+	now = ktime_get_real();
+
+	if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
+		ret = 1;
+
+	p->last_packet = now;
+	return ret;
+}
+
+static ssize_t wakeups_xmit_show(struct device *d,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_xmit);
+}
+
+DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
+
+static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", p->wakeups_rcv);
+}
+
+DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
+
+/* Set timeout in us. */
+static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
+			     const char *buf, size_t n)
+{
+#ifndef CONFIG_HAS_EARLYSUSPEND
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
+#else
+/* If using early suspend/resume hooks do not write the value on store. */
+	timeout_us = strict_strtoul(buf, NULL, 10);
+#endif
+	return n;
+}
+
+static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
+			    char *buf)
+{
+	struct rmnet_private *p = netdev_priv(to_net_dev(d));
+	p = netdev_priv(to_net_dev(d));
+	return sprintf(buf, "%lu\n", timeout_us);
+}
+
+DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
+#endif
+
+
+/* Forward declaration */
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	__be16 protocol = 0;
+
+	skb->dev = dev;
+
+	/* Determine L3 protocol */
+	switch (skb->data[0] & 0xf0) {
+	case 0x40:
+		protocol = htons(ETH_P_IP);
+		break;
+	case 0x60:
+		protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
+		       dev->name, skb->data[0] & 0xf0);
+		/* skb will be dropped in upper layer for unknown protocol */
+	}
+	return protocol;
+}
+
+static int count_this_packet(void *_hdr, int len)
+{
+	struct ethhdr *hdr = _hdr;
+
+	if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
+		return 0;
+
+	return 1;
+}
+
+static int sdio_update_reset_state(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int	new_state;
+
+	new_state = msm_sdio_is_channel_in_reset(p->ch_id);
+
+	if (p->in_reset != new_state) {
+		p->in_reset = (uint8_t)new_state;
+
+		if (p->in_reset)
+			netif_carrier_off(dev);
+		else
+			netif_carrier_on(dev);
+		return 1;
+	}
+	return 0;
+}
+
+/* Rx Callback, Called in Work Queue context */
+static void sdio_recv_notify(void *dev, struct sk_buff *skb)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	unsigned long flags;
+	u32 opmode;
+
+	if (skb) {
+		skb->dev = dev;
+		/* Handle Rx frame format */
+		spin_lock_irqsave(&p->lock, flags);
+		opmode = p->operation_mode;
+		spin_unlock_irqrestore(&p->lock, flags);
+
+		if (RMNET_IS_MODE_IP(opmode)) {
+			/* Driver in IP mode */
+			skb->protocol = rmnet_ip_type_trans(skb, dev);
+		} else {
+			/* Driver in Ethernet mode */
+			skb->protocol = eth_type_trans(skb, dev);
+		}
+		if (RMNET_IS_MODE_IP(opmode) ||
+		    count_this_packet(skb->data, skb->len)) {
+#ifdef CONFIG_MSM_RMNET_DEBUG
+			p->wakeups_rcv += rmnet_cause_wakeup(p);
+#endif
+			p->stats.rx_packets++;
+			p->stats.rx_bytes += skb->len;
+		}
+		DBG1("[%s] Rx packet #%lu len=%d\n",
+			((struct net_device *)dev)->name,
+			p->stats.rx_packets, skb->len);
+
+		/* Deliver to network stack */
+		netif_rx(skb);
+	} else {
+		spin_lock_irqsave(&p->lock, flags);
+		if (!sdio_update_reset_state((struct net_device *)dev))
+			pr_err("[%s] %s: No skb received",
+				((struct net_device *)dev)->name, __func__);
+		spin_unlock_irqrestore(&p->lock, flags);
+	}
+}
+
+static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int sdio_ret;
+	struct QMI_QOS_HDR_S *qmih;
+	u32 opmode;
+	unsigned long flags;
+
+	if (!netif_carrier_ok(dev)) {
+		pr_err("[%s] %s: channel in reset",
+			dev->name, __func__);
+		goto xmit_out;
+	}
+
+	/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
+	spin_lock_irqsave(&p->lock, flags);
+	opmode = p->operation_mode;
+	spin_unlock_irqrestore(&p->lock, flags);
+
+	if (RMNET_IS_MODE_QOS(opmode)) {
+		qmih = (struct QMI_QOS_HDR_S *)
+			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+		qmih->version = 1;
+		qmih->flags = 0;
+		qmih->flow_id = skb->mark;
+	}
+
+	dev->trans_start = jiffies;
+	sdio_ret = msm_sdio_dmux_write(p->ch_id, skb);
+
+	if (sdio_ret != 0) {
+		pr_err("[%s] %s: write returned error %d",
+			dev->name, __func__, sdio_ret);
+		goto xmit_out;
+	}
+
+	if (count_this_packet(skb->data, skb->len)) {
+		p->stats.tx_packets++;
+		p->stats.tx_bytes += skb->len;
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->wakeups_xmit += rmnet_cause_wakeup(p);
+#endif
+	}
+	DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
+	    dev->name, p->stats.tx_packets, skb->len, skb->mark);
+
+	return 0;
+xmit_out:
+	dev_kfree_skb_any(skb);
+	p->stats.tx_errors++;
+	return 0;
+}
+
+static void sdio_write_done(void *dev, struct sk_buff *skb)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+
+	if (skb)
+		dev_kfree_skb_any(skb);
+
+	if (!p->in_reset) {
+		DBG1("%s: write complete skb=%p\n",	__func__, skb);
+
+		if (netif_queue_stopped(dev) &&
+				msm_sdio_dmux_is_ch_low(p->ch_id)) {
+			DBG0("%s: Low WM hit, waking queue=%p\n",
+					__func__, skb);
+			netif_wake_queue(dev);
+		}
+	} else {
+		DBG1("%s: write in reset skb=%p\n",	__func__, skb);
+	}
+}
+
+static int __rmnet_open(struct net_device *dev)
+{
+	int r;
+	struct rmnet_private *p = netdev_priv(dev);
+
+	DBG0("[%s] __rmnet_open()\n", dev->name);
+
+	if (!p->device_up) {
+		r = msm_sdio_dmux_open(p->ch_id, dev,
+				       sdio_recv_notify, sdio_write_done);
+
+		if (r < 0)
+			return -ENODEV;
+	}
+
+	p->device_up = DEVICE_ACTIVE;
+	return 0;
+}
+
+static int rmnet_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	DBG0("[%s] rmnet_open()\n", dev->name);
+
+	rc = __rmnet_open(dev);
+
+	if (rc == 0)
+		netif_start_queue(dev);
+
+	return rc;
+}
+
+
+static int __rmnet_close(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	int rc = 0;
+
+	if (p->device_up) {
+		/* do not close rmnet port once up,  this causes
+		   remote side to hang if tried to open again */
+		/* rc = msm_sdio_dmux_close(p->ch_id); */
+		p->device_up = DEVICE_INACTIVE;
+		return rc;
+	} else
+		return -EBADF;
+}
+
+
+static int rmnet_stop(struct net_device *dev)
+{
+	DBG0("[%s] rmnet_stop()\n", dev->name);
+
+	__rmnet_close(dev);
+	netif_stop_queue(dev);
+
+	return 0;
+}
+
+static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
+		return -EINVAL;
+
+	DBG0("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+
+	if (netif_queue_stopped(dev)) {
+		pr_err("[%s]fatal: rmnet_xmit called when "
+			"netif_queue is stopped", dev->name);
+		return 0;
+	}
+
+	_rmnet_xmit(skb, dev);
+
+	if (msm_sdio_dmux_is_ch_full(p->ch_id)) {
+		netif_stop_queue(dev);
+		DBG0("%s: High WM hit, stopping queue=%p\n",	__func__, skb);
+	}
+
+	return 0;
+}
+
+static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	return &p->stats;
+}
+
+static void rmnet_set_multicast_list(struct net_device *dev)
+{
+}
+
+static void rmnet_tx_timeout(struct net_device *dev)
+{
+	pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
+}
+
+static const struct net_device_ops rmnet_ops_ether = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static const struct net_device_ops rmnet_ops_ip = {
+	.ndo_open = rmnet_open,
+	.ndo_stop = rmnet_stop,
+	.ndo_start_xmit = rmnet_xmit,
+	.ndo_get_stats = rmnet_get_stats,
+	.ndo_set_multicast_list = rmnet_set_multicast_list,
+	.ndo_tx_timeout = rmnet_tx_timeout,
+	.ndo_do_ioctl = rmnet_ioctl,
+	.ndo_change_mtu = rmnet_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct rmnet_private *p = netdev_priv(dev);
+	u32 old_opmode = p->operation_mode;
+	unsigned long flags;
+	int prev_mtu = dev->mtu;
+	int rc = 0;
+
+	/* Process IOCTL command */
+	switch (cmd) {
+	case RMNET_IOCTL_SET_LLP_ETHERNET:  /* Set Ethernet protocol   */
+		/* Perform Ethernet config only if in IP mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_IP) {
+			ether_setup(dev);
+			random_ether_addr(dev->dev_addr);
+			dev->mtu = prev_mtu;
+
+			dev->netdev_ops = &rmnet_ops_ether;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_IP;
+			p->operation_mode |= RMNET_MODE_LLP_ETH;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set Ethernet protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_SET_LLP_IP:        /* Set RAWIP protocol      */
+		/* Perform IP config only if in Ethernet mode currently*/
+		if (p->operation_mode & RMNET_MODE_LLP_ETH) {
+
+			/* Undo config done in ether_setup() */
+			dev->header_ops         = 0;  /* No header */
+			dev->type               = ARPHRD_RAWIP;
+			dev->hard_header_len    = 0;
+			dev->mtu                = prev_mtu;
+			dev->addr_len           = 0;
+			dev->flags              &= ~(IFF_BROADCAST|
+						     IFF_MULTICAST);
+
+			dev->needed_headroom = HEADROOM_FOR_SDIO +
+			  HEADROOM_FOR_QOS;
+			dev->needed_tailroom = TAILROOM;
+			dev->netdev_ops = &rmnet_ops_ip;
+			spin_lock_irqsave(&p->lock, flags);
+			p->operation_mode &= ~RMNET_MODE_LLP_ETH;
+			p->operation_mode |= RMNET_MODE_LLP_IP;
+			spin_unlock_irqrestore(&p->lock, flags);
+			DBG0("[%s] rmnet_ioctl(): "
+				"set IP protocol mode\n",
+				dev->name);
+		}
+		break;
+
+	case RMNET_IOCTL_GET_LLP:           /* Get link protocol state */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode &
+				 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
+		break;
+
+	case RMNET_IOCTL_SET_QOS_ENABLE:    /* Set QoS header enabled  */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode |= RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_SET_QOS_DISABLE:   /* Set QoS header disabled */
+		spin_lock_irqsave(&p->lock, flags);
+		p->operation_mode &= ~RMNET_MODE_QOS;
+		spin_unlock_irqrestore(&p->lock, flags);
+		DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_GET_QOS:           /* Get QoS header state    */
+		ifr->ifr_ifru.ifru_data =
+			(void *)(p->operation_mode & RMNET_MODE_QOS);
+		break;
+
+	case RMNET_IOCTL_GET_OPMODE:        /* Get operation mode      */
+		ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
+		break;
+
+	case RMNET_IOCTL_OPEN:              /* Open transport port     */
+		rc = __rmnet_open(dev);
+		DBG0("[%s] rmnet_ioctl(): open transport port\n",
+			dev->name);
+		break;
+
+	case RMNET_IOCTL_CLOSE:             /* Close transport port    */
+		rc = __rmnet_close(dev);
+		DBG0("[%s] rmnet_ioctl(): close transport port\n",
+			dev->name);
+		break;
+
+	default:
+		pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
+			dev->name, cmd);
+		return -EINVAL;
+	}
+
+	DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
+		dev->name, __func__, cmd, old_opmode, p->operation_mode);
+	return rc;
+}
+
+static void __init rmnet_setup(struct net_device *dev)
+{
+	/* Using Ethernet mode by default */
+	dev->netdev_ops = &rmnet_ops_ether;
+	ether_setup(dev);
+
+	/* set this after calling ether_setup */
+	dev->mtu = RMNET_DATA_LEN;
+	dev->needed_headroom = HEADROOM_FOR_SDIO + HEADROOM_FOR_QOS ;
+	dev->needed_tailroom = TAILROOM;
+	random_ether_addr(dev->dev_addr);
+
+	dev->watchdog_timeo = 1000; /* 10 seconds? */
+}
+
+
+static int __init rmnet_init(void)
+{
+	int ret;
+	struct device *d;
+	struct net_device *dev;
+	struct rmnet_private *p;
+	unsigned n;
+
+	pr_info("%s: SDIO devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+	timeout_us = 0;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	timeout_suspend_us = 0;
+#endif
+#endif
+
+	for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
+		dev = alloc_netdev(sizeof(struct rmnet_private),
+				   "rmnet_sdio%d", rmnet_setup);
+
+		if (!dev)
+			return -ENOMEM;
+
+		d = &(dev->dev);
+		p = netdev_priv(dev);
+		/* Initial config uses Ethernet */
+		p->operation_mode = RMNET_MODE_LLP_ETH;
+		p->ch_id = n;
+		spin_lock_init(&p->lock);
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		p->timeout_us = timeout_us;
+		p->wakeups_xmit = p->wakeups_rcv = 0;
+#endif
+
+		ret = register_netdev(dev);
+		if (ret) {
+			free_netdev(dev);
+			return ret;
+		}
+
+#ifdef CONFIG_MSM_RMNET_DEBUG
+		if (device_create_file(d, &dev_attr_timeout))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_xmit))
+			continue;
+		if (device_create_file(d, &dev_attr_wakeups_rcv))
+			continue;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+		if (device_create_file(d, &dev_attr_timeout_suspend))
+			continue;
+
+		/* Only care about rmnet0 for suspend/resume tiemout hooks. */
+		if (n == 0)
+			rmnet0 = d;
+#endif
+#endif
+	}
+	return 0;
+}
+
+module_init(rmnet_init);
+MODULE_DESCRIPTION("MSM RMNET SDIO TRANSPORT");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/net/qfec.c b/drivers/net/qfec.c
new file mode 100644
index 0000000..90e8eff
--- /dev/null
+++ b/drivers/net/qfec.c
@@ -0,0 +1,2521 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+
+#include <linux/platform_device.h>
+
+#include <linux/types.h>        /* size_t */
+#include <linux/interrupt.h>    /* mark_bh */
+
+#include <linux/netdevice.h>   /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+
+#include <linux/proc_fs.h>
+#include <linux/timer.h>
+#include <linux/mii.h>
+
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/inet.h>
+
+#include "qfec.h"
+
+#define QFEC_NAME       "qfec"
+#define QFEC_DRV_VER    "June 18a 2011"
+
+#define ETH_BUF_SIZE    0x600
+#define MAX_N_BD        50
+#define MAC_ADDR_SIZE	6
+
+#define RX_TX_BD_RATIO  8
+#define RX_BD_NUM       32
+#define TX_BD_NUM       (RX_BD_NUM * RX_TX_BD_RATIO)
+#define TX_BD_TI_RATIO  4
+
+/*
+ * logging macros
+ */
+#define QFEC_LOG_PR     1
+#define QFEC_LOG_DBG    2
+#define QFEC_LOG_DBG2   4
+#define QFEC_LOG_MDIO_W 8
+#define QFEC_LOG_MDIO_R 16
+
+static int qfec_debug = QFEC_LOG_PR;
+
+#ifdef QFEC_DEBUG
+# define QFEC_LOG(flag, ...)                    \
+	do {                                    \
+		if (flag & qfec_debug)          \
+			pr_info(__VA_ARGS__);  \
+	} while (0)
+#else
+# define QFEC_LOG(flag, ...)
+#endif
+
+#define QFEC_LOG_ERR(...) pr_err(__VA_ARGS__)
+
+/*
+ * driver buffer-descriptor
+ *   contains the 4 word HW descriptor plus an additional 4-words.
+ *   (See the DSL bits in the BUS-Mode register).
+ */
+#define BD_FLAG_LAST_BD     1
+
+struct buf_desc {
+	struct qfec_buf_desc   *p_desc;
+	struct sk_buff         *skb;
+	void                   *buf_virt_addr;
+	void                   *buf_phys_addr;
+	uint32_t                last_bd_flag;
+};
+
+/*
+ *inline functions accessing non-struct qfec_buf_desc elements
+ */
+
+/* skb */
+static inline struct sk_buff *qfec_bd_skbuf_get(struct buf_desc *p_bd)
+{
+	return p_bd->skb;
+};
+
+static inline void qfec_bd_skbuf_set(struct buf_desc *p_bd, struct sk_buff *p)
+{
+	p_bd->skb   = p;
+};
+
+/* virtual addr  */
+static inline void qfec_bd_virt_set(struct buf_desc *p_bd, void *addr)
+{
+	p_bd->buf_virt_addr = addr;
+};
+
+static inline void *qfec_bd_virt_get(struct buf_desc *p_bd)
+{
+	return p_bd->buf_virt_addr;
+};
+
+/* physical addr  */
+static inline void qfec_bd_phys_set(struct buf_desc *p_bd, void *addr)
+{
+	p_bd->buf_phys_addr = addr;
+};
+
+static inline void *qfec_bd_phys_get(struct buf_desc *p_bd)
+{
+	return p_bd->buf_phys_addr;
+};
+
+/* last_bd_flag */
+static inline uint32_t qfec_bd_last_bd(struct buf_desc *p_bd)
+{
+	return (p_bd->last_bd_flag != 0);
+};
+
+static inline void qfec_bd_last_bd_set(struct buf_desc *p_bd)
+{
+	p_bd->last_bd_flag = BD_FLAG_LAST_BD;
+};
+
+/*
+ *inline functions accessing struct qfec_buf_desc elements
+ */
+
+/* ownership bit */
+static inline uint32_t qfec_bd_own(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->status & BUF_OWN;
+};
+
+static inline void qfec_bd_own_set(struct buf_desc *p_bd)
+{
+	p_bd->p_desc->status |= BUF_OWN ;
+};
+
+static inline void qfec_bd_own_clr(struct buf_desc *p_bd)
+{
+	p_bd->p_desc->status &= ~(BUF_OWN);
+};
+
+static inline uint32_t qfec_bd_status_get(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->status;
+};
+
+static inline void qfec_bd_status_set(struct buf_desc *p_bd, uint32_t status)
+{
+	p_bd->p_desc->status = status;
+};
+
+static inline uint32_t qfec_bd_status_len(struct buf_desc *p_bd)
+{
+	return BUF_RX_FL_GET((*p_bd->p_desc));
+};
+
+/* control register */
+static inline void qfec_bd_ctl_reset(struct buf_desc *p_bd)
+{
+	p_bd->p_desc->ctl  = 0;
+};
+
+static inline uint32_t qfec_bd_ctl_get(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->ctl;
+};
+
+static inline void qfec_bd_ctl_set(struct buf_desc *p_bd, uint32_t val)
+{
+	p_bd->p_desc->ctl |= val;
+};
+
+static inline void qfec_bd_ctl_wr(struct buf_desc *p_bd, uint32_t val)
+{
+	p_bd->p_desc->ctl = val;
+};
+
+/* pbuf register  */
+static inline void *qfec_bd_pbuf_get(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->p_buf;
+}
+
+static inline void qfec_bd_pbuf_set(struct buf_desc *p_bd, void *p)
+{
+	p_bd->p_desc->p_buf = p;
+}
+
+/* next register */
+static inline void *qfec_bd_next_get(struct buf_desc *p_bd)
+{
+	return p_bd->p_desc->next;
+};
+
+/*
+ * initialize an RX BD w/ a new buf
+ */
+static int qfec_rbd_init(struct net_device *dev, struct buf_desc *p_bd)
+{
+	struct sk_buff     *skb;
+	void               *p;
+	void               *v;
+
+	/* allocate and record ptrs for sk buff */
+	skb   = dev_alloc_skb(ETH_BUF_SIZE);
+	if (!skb)
+		goto err;
+
+	qfec_bd_skbuf_set(p_bd, skb);
+
+	v = skb_put(skb, ETH_BUF_SIZE);
+	qfec_bd_virt_set(p_bd, v);
+
+	p = (void *) dma_map_single(&dev->dev,
+		(void *)skb->data, ETH_BUF_SIZE, DMA_FROM_DEVICE);
+	qfec_bd_pbuf_set(p_bd, p);
+	qfec_bd_phys_set(p_bd, p);
+
+	/* populate control register */
+	/* mark the last BD and set end-of-ring bit */
+	qfec_bd_ctl_wr(p_bd, ETH_BUF_SIZE |
+		(qfec_bd_last_bd(p_bd) ? BUF_RX_RER : 0));
+
+	qfec_bd_status_set(p_bd, BUF_OWN);
+
+	if (!(qfec_debug & QFEC_LOG_DBG2))
+		return 0;
+
+	/* debug messages */
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %p bd\n", __func__, p_bd);
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %p skb\n", __func__, skb);
+
+	QFEC_LOG(QFEC_LOG_DBG2,
+		"%s: %p p_bd, %p data, %p skb_put, %p virt, %p p_buf, %p p\n",
+		__func__, (void *)p_bd,
+		(void *)skb->data, v, /*(void *)skb_put(skb, ETH_BUF_SIZE), */
+		(void *)qfec_bd_virt_get(p_bd), (void *)qfec_bd_pbuf_get(p_bd),
+		(void *)p);
+
+	return 0;
+
+err:
+	return -ENOMEM;
+};
+
+/*
+ * ring structure used to maintain indices of buffer-descriptor (BD) usage
+ *
+ *   The RX BDs are normally all pre-allocated with buffers available to be
+ *   DMA'd into with received frames.  The head indicates the first BD/buffer
+ *   containing a received frame, and the tail indicates the oldest BD/buffer
+ *   that needs to be restored for use.   Head and tail are both initialized
+ *   to zero, and n_free is initialized to zero, since all BD are initialized.
+ *
+ *   The TX BDs are normally available for use, only being initialized as
+ *   TX frames are requested for transmission.   The head indicates the
+ *   first available BD, and the tail indicate the oldest BD that has
+ *   not been acknowledged as transmitted.    Head and tail are both initialized
+ *   to zero, and n_free is initialized to len, since all are available for use.
+ */
+struct ring {
+	int     head;
+	int     tail;
+	int     n_free;
+	int     len;
+};
+
+/* accessory in line functions for struct ring */
+static inline void qfec_ring_init(struct ring *p_ring, int size, int free)
+{
+	p_ring->head  = p_ring->tail = 0;
+	p_ring->len   = size;
+	p_ring->n_free = free;
+}
+
+static inline int qfec_ring_full(struct ring *p_ring)
+{
+	return (p_ring->n_free == 0);
+};
+
+static inline int qfec_ring_empty(struct ring *p_ring)
+{
+	return (p_ring->n_free == p_ring->len);
+}
+
+static inline void qfec_ring_head_adv(struct ring *p_ring)
+{
+	p_ring->head = ++p_ring->head % p_ring->len;
+	p_ring->n_free--;
+};
+
+static inline void qfec_ring_tail_adv(struct ring *p_ring)
+{
+	p_ring->tail = ++p_ring->tail % p_ring->len;
+	p_ring->n_free++;
+};
+
+static inline int qfec_ring_head(struct ring *p_ring)
+{
+
+	return p_ring->head;
+};
+
+static inline int qfec_ring_tail(struct ring *p_ring)
+{
+	return p_ring->tail;
+};
+
+static inline int qfec_ring_room(struct ring *p_ring)
+{
+	return p_ring->n_free;
+};
+
+/*
+ * counters track normal and abnormal driver events and activity
+ */
+enum cntr {
+	isr                  =  0,
+	fatal_bus,
+
+	early_tx,
+	tx_no_resource,
+	tx_proc_stopped,
+	tx_jabber_tmout,
+
+	xmit,
+	tx_int,
+	tx_isr,
+	tx_owned,
+	tx_underflow,
+
+	tx_replenish,
+	tx_skb_null,
+	tx_timeout,
+	tx_too_large,
+
+	gmac_isr,
+
+	/* half */
+	norm_int,
+	abnorm_int,
+
+	early_rx,
+	rx_buf_unavail,
+	rx_proc_stopped,
+	rx_watchdog,
+
+	netif_rx_cntr,
+	rx_int,
+	rx_isr,
+	rx_owned,
+	rx_overflow,
+
+	rx_dropped,
+	rx_skb_null,
+	queue_start,
+	queue_stop,
+
+	rx_paddr_nok,
+	ts_ioctl,
+	ts_tx_en,
+	ts_tx_rtn,
+
+	ts_rec,
+	cntr_last,
+};
+
+static char *cntr_name[]  = {
+	"isr",
+	"fatal_bus",
+
+	"early_tx",
+	"tx_no_resource",
+	"tx_proc_stopped",
+	"tx_jabber_tmout",
+
+	"xmit",
+	"tx_int",
+	"tx_isr",
+	"tx_owned",
+	"tx_underflow",
+
+	"tx_replenish",
+	"tx_skb_null",
+	"tx_timeout",
+	"tx_too_large",
+
+	"gmac_isr",
+
+	/* half */
+	"norm_int",
+	"abnorm_int",
+
+	"early_rx",
+	"rx_buf_unavail",
+	"rx_proc_stopped",
+	"rx_watchdog",
+
+	"netif_rx",
+	"rx_int",
+	"rx_isr",
+	"rx_owned",
+	"rx_overflow",
+
+	"rx_dropped",
+	"rx_skb_null",
+	"queue_start",
+	"queue_stop",
+
+	"rx_paddr_nok",
+	"ts_ioctl",
+	"ts_tx_en",
+	"ts_tx_rtn",
+
+	"ts_rec",
+	""
+};
+
+/*
+ * private data
+ */
+
+static struct net_device  *qfec_dev;
+
+enum qfec_state {
+	timestamping  = 0x04,
+};
+
+struct qfec_priv {
+	struct net_device      *net_dev;
+	struct net_device_stats stats;            /* req statistics */
+
+	struct device           dev;
+
+	spinlock_t              xmit_lock;
+	spinlock_t              mdio_lock;
+
+	unsigned int            state;            /* driver state */
+
+	unsigned int            bd_size;          /* buf-desc alloc size */
+	struct qfec_buf_desc   *bd_base;          /* * qfec-buf-desc */
+	dma_addr_t              tbd_dma;          /* dma/phy-addr buf-desc */
+	dma_addr_t              rbd_dma;          /* dma/phy-addr buf-desc */
+
+	struct resource        *mac_res;
+	void                   *mac_base;         /* mac (virt) base address */
+
+	struct resource        *clk_res;
+	void                   *clk_base;         /* clk (virt) base address */
+
+	struct resource        *fuse_res;
+	void                   *fuse_base;        /* mac addr fuses */
+
+	unsigned int            n_tbd;            /* # of TX buf-desc */
+	struct ring             ring_tbd;         /* TX ring */
+	struct buf_desc        *p_tbd;
+	unsigned int            tx_ic_mod;        /* (%) val for setting IC */
+
+	unsigned int            n_rbd;            /* # of RX buf-desc */
+	struct ring             ring_rbd;         /* RX ring */
+	struct buf_desc        *p_rbd;
+
+	struct buf_desc        *p_latest_rbd;
+	struct buf_desc        *p_ending_rbd;
+
+	unsigned long           cntr[cntr_last];  /* activity counters */
+
+	struct mii_if_info      mii;              /* used by mii lib */
+
+	int                     mdio_clk;         /* phy mdio clock rate */
+	int                     phy_id;           /* default PHY addr (0) */
+	struct timer_list       phy_tmr;          /* monitor PHY state */
+};
+
+/*
+ * cntrs display
+ */
+
+static int qfec_cntrs_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv        *priv = netdev_priv(to_net_dev(dev));
+	int                      h    = (cntr_last + 1) / 2;
+	int                      l;
+	int                      n;
+	int                      count = PAGE_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	l = snprintf(&buf[0], count, "%s:\n", __func__);
+	for (n = 0; n < h; n++)  {
+		l += snprintf(&buf[l], count - l,
+			"      %12lu  %-16s %12lu  %s\n",
+			priv->cntr[n],   cntr_name[n],
+			priv->cntr[n+h], cntr_name[n+h]);
+	}
+
+	return l;
+}
+
+# define CNTR_INC(priv, name)  (priv->cntr[name]++)
+
+/*
+ * functions that manage state
+ */
+static inline void qfec_queue_start(struct net_device *dev)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	if (netif_queue_stopped(dev)) {
+		netif_wake_queue(dev);
+		CNTR_INC(priv, queue_start);
+	}
+};
+
+static inline void qfec_queue_stop(struct net_device *dev)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	CNTR_INC(priv, queue_stop);
+};
+
+/*
+ * functions to access and initialize the MAC registers
+ */
+static inline uint32_t qfec_reg_read(struct qfec_priv *priv, uint32_t reg)
+{
+	return ioread32((void *) (priv->mac_base + reg));
+}
+
+static void qfec_reg_write(struct qfec_priv *priv, uint32_t reg, uint32_t val)
+{
+	uint32_t    addr = (uint32_t)priv->mac_base + reg;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
+	iowrite32(val, (void *)addr);
+}
+
+/*
+ * speed/duplex/pause  settings
+ */
+static int qfec_config_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv        *priv = netdev_priv(to_net_dev(dev));
+	int                      cfg  = qfec_reg_read(priv, MAC_CONFIG_REG);
+	int                      flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
+	int                      l    = 0;
+	int                      count = PAGE_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	l += snprintf(&buf[l], count, "%s:", __func__);
+
+	l += snprintf(&buf[l], count - l, "  [0x%08x] %4dM %s %s", cfg,
+		(cfg & MAC_CONFIG_REG_PS)
+			? ((cfg & MAC_CONFIG_REG_FES) ? 100 : 10) : 1000,
+		cfg & MAC_CONFIG_REG_DM ? "FD" : "HD",
+		cfg & MAC_CONFIG_REG_IPC ? "IPC" : "NoIPC");
+
+	flow &= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
+	l += snprintf(&buf[l], count - l, "  [0x%08x] %s", flow,
+		(flow == (FLOW_CONTROL_RFE | FLOW_CONTROL_TFE)) ? "PAUSE"
+			: ((flow == FLOW_CONTROL_RFE) ? "RX-PAUSE"
+			: ((flow == FLOW_CONTROL_TFE) ? "TX-PAUSE" : "")));
+
+	l += snprintf(&buf[l], count - l, " %s", QFEC_DRV_VER);
+	l += snprintf(&buf[l], count - l, "\n");
+	return l;
+}
+
+
+/*
+ * table and functions to initialize controller registers
+ */
+
+struct reg_entry {
+	unsigned int  rdonly;
+	unsigned int  addr;
+	char         *label;
+	unsigned int  val;
+};
+
+static struct reg_entry  qfec_reg_tbl[] = {
+	{ 0, BUS_MODE_REG,           "BUS_MODE_REG",     BUS_MODE_REG_DEFAULT },
+	{ 0, AXI_BUS_MODE_REG,       "AXI_BUS_MODE_REG", AXI_BUS_MODE_DEFAULT },
+	{ 0, AXI_STATUS_REG,         "AXI_STATUS_REG",     0 },
+
+	{ 0, MAC_ADR_0_HIGH_REG,     "MAC_ADR_0_HIGH_REG", 0x00000302 },
+	{ 0, MAC_ADR_0_LOW_REG,      "MAC_ADR_0_LOW_REG",  0x01350702 },
+
+	{ 1, RX_DES_LST_ADR_REG,     "RX_DES_LST_ADR_REG", 0 },
+	{ 1, TX_DES_LST_ADR_REG,     "TX_DES_LST_ADR_REG", 0 },
+	{ 1, STATUS_REG,             "STATUS_REG",         0 },
+	{ 1, DEBUG_REG,              "DEBUG_REG",          0 },
+
+	{ 0, INTRP_EN_REG,           "INTRP_EN_REG",       QFEC_INTRP_SETUP},
+
+	{ 1, CUR_HOST_TX_DES_REG,    "CUR_HOST_TX_DES_REG",    0 },
+	{ 1, CUR_HOST_RX_DES_REG,    "CUR_HOST_RX_DES_REG",    0 },
+	{ 1, CUR_HOST_TX_BU_ADR_REG, "CUR_HOST_TX_BU_ADR_REG", 0 },
+	{ 1, CUR_HOST_RX_BU_ADR_REG, "CUR_HOST_RX_BU_ADR_REG", 0 },
+
+	{ 1, MAC_FR_FILTER_REG,      "MAC_FR_FILTER_REG",      0 },
+
+	{ 0, MAC_CONFIG_REG,         "MAC_CONFIG_REG",    MAC_CONFIG_REG_SPD_1G
+							| MAC_CONFIG_REG_DM
+							| MAC_CONFIG_REG_TE
+							| MAC_CONFIG_REG_RE
+							| MAC_CONFIG_REG_IPC },
+
+	{ 1, INTRP_STATUS_REG,       "INTRP_STATUS_REG",   0 },
+	{ 1, INTRP_MASK_REG,         "INTRP_MASK_REG",     0 },
+
+	{ 0, OPER_MODE_REG,          "OPER_MODE_REG",  OPER_MODE_REG_DEFAULT },
+
+	{ 1, GMII_ADR_REG,           "GMII_ADR_REG",           0 },
+	{ 1, GMII_DATA_REG,          "GMII_DATA_REG",          0 },
+
+	{ 0, MMC_INTR_MASK_RX_REG,   "MMC_INTR_MASK_RX_REG",   0xFFFFFFFF },
+	{ 0, MMC_INTR_MASK_TX_REG,   "MMC_INTR_MASK_TX_REG",   0xFFFFFFFF },
+
+	{ 1, TS_HIGH_REG,            "TS_HIGH_REG",            0 },
+	{ 1, TS_LOW_REG,             "TS_LOW_REG",             0 },
+
+	{ 1, TS_HI_UPDT_REG,         "TS_HI_UPDATE_REG",       0 },
+	{ 1, TS_LO_UPDT_REG,         "TS_LO_UPDATE_REG",       0 },
+	{ 0, TS_SUB_SEC_INCR_REG,    "TS_SUB_SEC_INCR_REG",    86 },
+
+	{ 0, TS_CTL_REG,             "TS_CTL_REG",        TS_CTL_TSENALL
+							| TS_CTL_TSCTRLSSR
+							| TS_CTL_TSINIT
+							| TS_CTL_TSENA },
+};
+
+static void qfec_reg_init(struct qfec_priv *priv)
+{
+	struct reg_entry *p = qfec_reg_tbl;
+	int         n = ARRAY_SIZE(qfec_reg_tbl);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	for  (; n--; p++) {
+		if (!p->rdonly)
+			qfec_reg_write(priv, p->addr, p->val);
+	}
+}
+
+/*
+ * display registers thru sysfs
+ */
+static int qfec_reg_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv   *priv = netdev_priv(to_net_dev(dev));
+	struct reg_entry   *p = qfec_reg_tbl;
+	int                 n = ARRAY_SIZE(qfec_reg_tbl);
+	int                 l = 0;
+	int                 count = PAGE_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	for (; n--; p++) {
+		l += snprintf(&buf[l], count - l, "    %8p   %04x %08x  %s\n",
+			(void *)priv->mac_base + p->addr, p->addr,
+			qfec_reg_read(priv, p->addr), p->label);
+	}
+
+	return  l;
+}
+
+/*
+ * set the MAC-0 address
+ */
+static void qfec_set_adr_regs(struct qfec_priv *priv, uint8_t *addr)
+{
+	uint32_t        h = 0;
+	uint32_t        l = 0;
+
+	h = h << 8 | addr[5];
+	h = h << 8 | addr[4];
+
+	l = l << 8 | addr[3];
+	l = l << 8 | addr[2];
+	l = l << 8 | addr[1];
+	l = l << 8 | addr[0];
+
+	qfec_reg_write(priv, MAC_ADR_0_HIGH_REG, h);
+	qfec_reg_write(priv, MAC_ADR_0_LOW_REG,  l);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %08x %08x\n", __func__, h, l);
+}
+
+/*
+ * reset the controller
+ */
+
+#define QFEC_RESET_TIMEOUT   10000
+	/* reset should always clear but did not w/o test/delay
+	 * in RgMii mode.  there is no spec'd max timeout
+	 */
+
+static int qfec_hw_reset(struct qfec_priv *priv)
+{
+	int             timeout = QFEC_RESET_TIMEOUT;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	qfec_reg_write(priv, BUS_MODE_REG, BUS_MODE_SWR);
+
+	while (qfec_reg_read(priv, BUS_MODE_REG) & BUS_MODE_SWR) {
+		if (timeout-- == 0) {
+			QFEC_LOG_ERR("%s: timeout\n", __func__);
+			return -ETIME;
+		}
+
+		/* there were problems resetting the controller
+		 * in RGMII mode when there wasn't sufficient
+		 * delay between register reads
+		 */
+		usleep_range(100, 200);
+	}
+
+	return 0;
+}
+
+/*
+ * initialize controller
+ */
+static int qfec_hw_init(struct qfec_priv *priv)
+{
+	int  res = 0;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	res = qfec_hw_reset(priv);
+	if (res)
+		return res;
+
+	qfec_reg_init(priv);
+
+	/* config buf-desc locations */
+	qfec_reg_write(priv, TX_DES_LST_ADR_REG, priv->tbd_dma);
+	qfec_reg_write(priv, RX_DES_LST_ADR_REG, priv->rbd_dma);
+
+	/* clear interrupts */
+	qfec_reg_write(priv, STATUS_REG, INTRP_EN_REG_NIE | INTRP_EN_REG_RIE
+		| INTRP_EN_REG_TIE | INTRP_EN_REG_TUE | INTRP_EN_REG_ETE);
+
+	return res;
+}
+
+/*
+ * en/disable controller
+ */
+static void qfec_hw_enable(struct qfec_priv *priv)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	qfec_reg_write(priv, OPER_MODE_REG,
+	qfec_reg_read(priv, OPER_MODE_REG)
+		| OPER_MODE_REG_ST | OPER_MODE_REG_SR);
+}
+
+static void qfec_hw_disable(struct qfec_priv *priv)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	qfec_reg_write(priv, OPER_MODE_REG,
+	qfec_reg_read(priv, OPER_MODE_REG)
+		& ~(OPER_MODE_REG_ST | OPER_MODE_REG_SR));
+}
+
+/*
+ * interface selection
+ */
+struct intf_config  {
+	uint32_t     intf_sel;
+	uint32_t     emac_ns;
+	uint32_t     eth_x_en_ns;
+	uint32_t     clkmux_sel;
+};
+
+#define ETH_X_EN_NS_REVMII      (ETH_X_EN_NS_DEFAULT | ETH_TX_CLK_INV)
+#define CLKMUX_REVMII           (EMAC_CLKMUX_SEL_0 | EMAC_CLKMUX_SEL_1)
+
+static struct intf_config intf_config_tbl[] = {
+	{ EMAC_PHY_INTF_SEL_MII,    EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
+	{ EMAC_PHY_INTF_SEL_RGMII,  EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 },
+	{ EMAC_PHY_INTF_SEL_REVMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_REVMII,
+								CLKMUX_REVMII }
+};
+
+/*
+ * emac clk register read and write functions
+ */
+static inline uint32_t qfec_clkreg_read(struct qfec_priv *priv, uint32_t reg)
+{
+	return ioread32((void *) (priv->clk_base + reg));
+}
+
+static inline void qfec_clkreg_write(struct qfec_priv *priv,
+	uint32_t reg, uint32_t val)
+{
+	uint32_t   addr = (uint32_t)priv->clk_base + reg;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val);
+	iowrite32(val, (void *)addr);
+}
+
+/*
+ * configure the PHY interface and clock routing and signal bits
+ */
+enum phy_intfc  {
+	intfc_mii     = 0,
+	intfc_rgmii   = 1,
+	intfc_revmii  = 2,
+};
+
+static int qfec_intf_sel(struct qfec_priv *priv, unsigned int intfc)
+{
+	struct intf_config   *p;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %d\n", __func__, intfc);
+
+	if (intfc > intfc_revmii)  {
+		QFEC_LOG_ERR("%s: range\n", __func__);
+		return -ENXIO;
+	}
+
+	p = &intf_config_tbl[intfc];
+
+	qfec_clkreg_write(priv, EMAC_PHY_INTF_SEL_REG, p->intf_sel);
+	qfec_clkreg_write(priv, EMAC_NS_REG,           p->emac_ns);
+	qfec_clkreg_write(priv, ETH_X_EN_NS_REG,       p->eth_x_en_ns);
+	qfec_clkreg_write(priv, EMAC_CLKMUX_SEL_REG,   p->clkmux_sel);
+
+	return 0;
+}
+
+/*
+ * display registers thru proc-fs
+ */
+static struct qfec_clk_reg {
+	uint32_t        offset;
+	char           *label;
+} qfec_clk_regs[] = {
+	{ ETH_MD_REG,                  "ETH_MD_REG"  },
+	{ ETH_NS_REG,                  "ETH_NS_REG"  },
+	{ ETH_X_EN_NS_REG,             "ETH_X_EN_NS_REG"  },
+	{ EMAC_PTP_MD_REG,             "EMAC_PTP_MD_REG"  },
+	{ EMAC_PTP_NS_REG,             "EMAC_PTP_NS_REG"  },
+	{ EMAC_NS_REG,                 "EMAC_NS_REG"  },
+	{ EMAC_TX_FS_REG,              "EMAC_TX_FS_REG"  },
+	{ EMAC_RX_FS_REG,              "EMAC_RX_FS_REG"  },
+	{ EMAC_PHY_INTF_SEL_REG,       "EMAC_PHY_INTF_SEL_REG"  },
+	{ EMAC_PHY_ADDR_REG,           "EMAC_PHY_ADDR_REG"  },
+	{ EMAC_REVMII_PHY_ADDR_REG,    "EMAC_REVMII_PHY_ADDR_REG"  },
+	{ EMAC_CLKMUX_SEL_REG,         "EMAC_CLKMUX_SEL_REG"  },
+};
+
+static int qfec_clk_reg_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv        *priv = netdev_priv(to_net_dev(dev));
+	struct qfec_clk_reg     *p = qfec_clk_regs;
+	int                      n = ARRAY_SIZE(qfec_clk_regs);
+	int                      l = 0;
+	int                      count = PAGE_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	for (; n--; p++) {
+		l += snprintf(&buf[l], count - l, "    %8p  %8x  %08x  %s\n",
+			(void *)priv->clk_base + p->offset, p->offset,
+			qfec_clkreg_read(priv, p->offset), p->label);
+	}
+
+	return  l;
+}
+
+/*
+ * speed selection
+ */
+
+struct qfec_pll_cfg {
+	uint32_t    spd;
+	uint32_t    eth_md;     /* M [31:16], NOT 2*D [15:0] */
+	uint32_t    eth_ns;     /* NOT(M-N) [31:16], ctl bits [11:0]  */
+};
+
+static struct qfec_pll_cfg qfec_pll_cfg_tbl[] = {
+	/* 2.5 MHz */
+	{ MAC_CONFIG_REG_SPD_10,   ETH_MD_M(1)  | ETH_MD_2D_N(100),
+						  ETH_NS_NM(100-1)
+						| ETH_NS_MCNTR_EN
+						| ETH_NS_MCNTR_MODE_DUAL
+						| ETH_NS_PRE_DIV(0)
+						| CLK_SRC_PLL_EMAC },
+	/* 25 MHz */
+	{ MAC_CONFIG_REG_SPD_100,  ETH_MD_M(1)  | ETH_MD_2D_N(10),
+						  ETH_NS_NM(10-1)
+						| ETH_NS_MCNTR_EN
+						| ETH_NS_MCNTR_MODE_DUAL
+						| ETH_NS_PRE_DIV(0)
+						| CLK_SRC_PLL_EMAC },
+	/* 125 MHz */
+	{MAC_CONFIG_REG_SPD_1G,    0,             ETH_NS_PRE_DIV(1)
+						| CLK_SRC_PLL_EMAC },
+};
+
+enum speed  {
+	spd_10   = 0,
+	spd_100  = 1,
+	spd_1000 = 2,
+};
+
+/*
+ * configure the PHY interface and clock routing and signal bits
+ */
+static int qfec_speed_cfg(struct net_device *dev, unsigned int spd,
+	unsigned int dplx)
+{
+	struct qfec_priv       *priv = netdev_priv(dev);
+	struct qfec_pll_cfg    *p;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %d spd, %d dplx\n", __func__, spd, dplx);
+
+	if (spd > spd_1000)  {
+		QFEC_LOG_ERR("%s: range\n", __func__);
+		return -ENODEV;
+	}
+
+	p = &qfec_pll_cfg_tbl[spd];
+
+	/* set the MAC speed bits */
+	qfec_reg_write(priv, MAC_CONFIG_REG,
+	(qfec_reg_read(priv, MAC_CONFIG_REG)
+		& ~(MAC_CONFIG_REG_SPD | MAC_CONFIG_REG_DM))
+			| p->spd | (dplx ? MAC_CONFIG_REG_DM : 0));
+
+	qfec_clkreg_write(priv, ETH_MD_REG, p->eth_md);
+	qfec_clkreg_write(priv, ETH_NS_REG, p->eth_ns);
+
+	return 0;
+}
+
+/*
+ * configure PTP divider for 25 MHz assuming EMAC PLL 250 MHz
+ */
+
+static struct qfec_pll_cfg qfec_pll_ptp = {
+	/* 25 MHz */
+	0,      ETH_MD_M(1) | ETH_MD_2D_N(10),    ETH_NS_NM(10-1)
+						| EMAC_PTP_NS_ROOT_EN
+						| EMAC_PTP_NS_CLK_EN
+						| ETH_NS_MCNTR_EN
+						| ETH_NS_MCNTR_MODE_DUAL
+						| ETH_NS_PRE_DIV(0)
+						| CLK_SRC_PLL_EMAC
+};
+
+#define PLLTEST_PAD_CFG     0x01E0
+#define PLLTEST_PLL_7       0x3700
+
+#define CLKTEST_REG         0x01EC
+#define CLKTEST_EMAC_RX     0x3fc07f7a
+
+static int qfec_ptp_cfg(struct qfec_priv *priv)
+{
+	struct qfec_pll_cfg    *p    = &qfec_pll_ptp;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x md, %08x ns\n",
+		__func__, p->eth_md, p->eth_ns);
+
+	qfec_clkreg_write(priv, EMAC_PTP_MD_REG, p->eth_md);
+	qfec_clkreg_write(priv, EMAC_PTP_NS_REG, p->eth_ns);
+
+	/* configure HS/LS clk test ports to verify clks */
+	qfec_clkreg_write(priv, CLKTEST_REG,     CLKTEST_EMAC_RX);
+	qfec_clkreg_write(priv, PLLTEST_PAD_CFG, PLLTEST_PLL_7);
+
+	return 0;
+}
+
+/*
+ * MDIO operations
+ */
+
+/*
+ * wait reasonable amount of time for MDIO operation to complete, not busy
+ */
+static int qfec_mdio_busy(struct net_device *dev)
+{
+	int     i;
+
+	for (i = 100; i > 0; i--)  {
+		if (!(qfec_reg_read(
+			netdev_priv(dev), GMII_ADR_REG) & GMII_ADR_REG_GB))  {
+			return 0;
+		}
+		udelay(1);
+	}
+
+	return -ETIME;
+}
+
+/*
+ * initiate either a read or write MDIO operation
+ */
+
+static int qfec_mdio_oper(struct net_device *dev, int phy_id, int reg, int wr)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	int                 res = 0;
+
+	/* insure phy not busy */
+	res = qfec_mdio_busy(dev);
+	if (res)  {
+		QFEC_LOG_ERR("%s: busy\n", __func__);
+		goto done;
+	}
+
+	/* initiate operation */
+	qfec_reg_write(priv, GMII_ADR_REG,
+		GMII_ADR_REG_ADR_SET(phy_id)
+		| GMII_ADR_REG_REG_SET(reg)
+		| GMII_ADR_REG_CSR_SET(priv->mdio_clk)
+		| (wr ? GMII_ADR_REG_GW : 0)
+		| GMII_ADR_REG_GB);
+
+	/* wait for operation to complete */
+	res = qfec_mdio_busy(dev);
+	if (res)
+		QFEC_LOG_ERR("%s: timeout\n", __func__);
+
+done:
+	return res;
+}
+
+/*
+ * read MDIO register
+ */
+static int qfec_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	int                 res = 0;
+	unsigned long       flags;
+
+	spin_lock_irqsave(&priv->mdio_lock, flags);
+
+	res = qfec_mdio_oper(dev, phy_id, reg, 0);
+	if (res)  {
+		QFEC_LOG_ERR("%s: oper\n", __func__);
+		goto done;
+	}
+
+	res = qfec_reg_read(priv, GMII_DATA_REG);
+	QFEC_LOG(QFEC_LOG_MDIO_R, "%s: %2d reg, 0x%04x val\n",
+		__func__, reg, res);
+
+done:
+	spin_unlock_irqrestore(&priv->mdio_lock, flags);
+	return res;
+}
+
+/*
+ * write MDIO register
+ */
+static void qfec_mdio_write(struct net_device *dev, int phy_id, int reg,
+	int val)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	unsigned long       flags;
+
+	spin_lock_irqsave(&priv->mdio_lock, flags);
+
+	QFEC_LOG(QFEC_LOG_MDIO_W, "%s: %2d reg, %04x\n",
+		__func__, reg, val);
+
+	qfec_reg_write(priv, GMII_DATA_REG, val);
+
+	if (qfec_mdio_oper(dev, phy_id, reg, 1))
+		QFEC_LOG_ERR("%s: oper\n", __func__);
+
+	spin_unlock_irqrestore(&priv->mdio_lock, flags);
+}
+
+/*
+ * get auto-negotiation results
+ */
+
+#define QFEC_100        (LPA_100HALF | LPA_100FULL | LPA_100HALF)
+#define QFEC_100_FD     (LPA_100FULL | LPA_100BASE4)
+#define QFEC_10         (LPA_10HALF  | LPA_10FULL)
+#define QFEC_10_FD       LPA_10FULL
+
+static void qfec_get_an(struct net_device *dev, uint32_t *spd, uint32_t *dplx)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	uint32_t            status;
+	uint32_t            advert;
+	uint32_t            lpa;
+	uint32_t            flow;
+
+	advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
+	lpa    = qfec_mdio_read(dev, priv->phy_id, MII_LPA);
+	status = advert & lpa;
+
+	/* todo: check extended status register for 1G abilities */
+
+	if (status & QFEC_100)  {
+		*spd  = spd_100;
+		*dplx = status & QFEC_100_FD ? 1 : 0;
+	}
+
+	else if (status & QFEC_10)  {
+		*spd  = spd_10;
+		*dplx = status & QFEC_10_FD ? 1 : 0;
+	}
+
+	/* check pause */
+	flow  = qfec_reg_read(priv, FLOW_CONTROL_REG);
+	flow &= ~(FLOW_CONTROL_TFE | FLOW_CONTROL_RFE);
+
+	if (status & ADVERTISE_PAUSE_CAP)  {
+		flow |= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE;
+	} else if (status & ADVERTISE_PAUSE_ASYM)  {
+		if (lpa & ADVERTISE_PAUSE_CAP)
+			flow |= FLOW_CONTROL_TFE;
+		else if (advert & ADVERTISE_PAUSE_CAP)
+			flow |= FLOW_CONTROL_RFE;
+	}
+
+	qfec_reg_write(priv, FLOW_CONTROL_REG, flow);
+}
+
+/*
+ * monitor phy status, and process auto-neg results when changed
+ */
+
+static void qfec_phy_monitor(unsigned long data)
+{
+	struct net_device  *dev  = (struct net_device *) data;
+	struct qfec_priv   *priv = netdev_priv(dev);
+	unsigned int        spd  = 0;
+	unsigned int        dplx = 1;
+
+	mod_timer(&priv->phy_tmr, jiffies + HZ);
+
+	if (mii_link_ok(&priv->mii) && !netif_carrier_ok(priv->net_dev))  {
+		qfec_get_an(dev, &spd, &dplx);
+		qfec_speed_cfg(dev, spd, dplx);
+		QFEC_LOG(QFEC_LOG_DBG, "%s: link up, %d spd, %d dplx\n",
+			__func__, spd, dplx);
+
+		netif_carrier_on(dev);
+	}
+
+	else if (!mii_link_ok(&priv->mii) && netif_carrier_ok(priv->net_dev))  {
+		QFEC_LOG(QFEC_LOG_DBG, "%s: link down\n", __func__);
+		netif_carrier_off(dev);
+	}
+}
+
+/*
+ * dealloc buffer descriptor memory
+ */
+
+static void qfec_mem_dealloc(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+
+	dma_free_coherent(&dev->dev,
+		priv->bd_size, priv->bd_base, priv->tbd_dma);
+	priv->bd_base = 0;
+}
+
+/*
+ * allocate shared device memory for TX/RX buf-desc (and buffers)
+ */
+
+static int qfec_mem_alloc(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
+
+	priv->bd_size =
+		(priv->n_tbd + priv->n_rbd) * sizeof(struct qfec_buf_desc);
+
+	priv->p_tbd = kcalloc(priv->n_tbd, sizeof(struct buf_desc), GFP_KERNEL);
+	if (!priv->p_tbd)  {
+		QFEC_LOG_ERR("%s: kcalloc failed p_tbd\n", __func__);
+		return -ENOMEM;
+	}
+
+	priv->p_rbd = kcalloc(priv->n_rbd, sizeof(struct buf_desc), GFP_KERNEL);
+	if (!priv->p_rbd)  {
+		QFEC_LOG_ERR("%s: kcalloc failed p_rbd\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* alloc mem for buf-desc, if not already alloc'd */
+	if (!priv->bd_base)  {
+		priv->bd_base = dma_alloc_coherent(&dev->dev,
+			priv->bd_size, &priv->tbd_dma,
+			GFP_KERNEL | __GFP_DMA);
+	}
+
+	if (!priv->bd_base)  {
+		QFEC_LOG_ERR("%s: dma_alloc_coherent failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	priv->rbd_dma   = priv->tbd_dma
+			+ (priv->n_tbd * sizeof(struct qfec_buf_desc));
+
+	QFEC_LOG(QFEC_LOG_DBG,
+		" %s: 0x%08x size, %d n_tbd, %d n_rbd\n",
+		__func__, priv->bd_size, priv->n_tbd, priv->n_rbd);
+
+	return 0;
+}
+
+/*
+ * display buffer descriptors
+ */
+
+static int qfec_bd_fmt(char *buf, int size, struct buf_desc *p_bd)
+{
+	return snprintf(buf, size,
+		"%8p: %08x %08x %8p %8p  %8p %8p %8p %x",
+		p_bd,                     qfec_bd_status_get(p_bd),
+		qfec_bd_ctl_get(p_bd),    qfec_bd_pbuf_get(p_bd),
+		qfec_bd_next_get(p_bd),   qfec_bd_skbuf_get(p_bd),
+		qfec_bd_virt_get(p_bd),   qfec_bd_phys_get(p_bd),
+		qfec_bd_last_bd(p_bd));
+}
+
+static int qfec_bd_show(char *buf, int count, struct buf_desc *p_bd, int n_bd,
+	struct ring *p_ring, char *label)
+{
+	int     l = 0;
+	int     n;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, label);
+
+	l += snprintf(&buf[l], count, "%s: %s\n", __func__, label);
+	if (!p_bd)
+		return l;
+
+	n_bd = n_bd > MAX_N_BD ? MAX_N_BD : n_bd;
+
+	for (n = 0; n < n_bd; n++, p_bd++) {
+		l += qfec_bd_fmt(&buf[l], count - l, p_bd);
+		l += snprintf(&buf[l], count - l, "%s%s\n",
+			(qfec_ring_head(p_ring) == n ? " < h" : ""),
+			(qfec_ring_tail(p_ring) == n ? " < t" : ""));
+	}
+
+	return l;
+}
+
+/*
+ * display TX BDs
+ */
+static int qfec_bd_tx_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv   *priv = netdev_priv(to_net_dev(dev));
+	int                 count = PAGE_SIZE;
+
+	return qfec_bd_show(buf, count, priv->p_tbd, priv->n_tbd,
+				&priv->ring_tbd, "TX");
+}
+
+/*
+ * display RX BDs
+ */
+static int qfec_bd_rx_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct qfec_priv   *priv = netdev_priv(to_net_dev(dev));
+	int                 count = PAGE_SIZE;
+
+	return  qfec_bd_show(buf, count, priv->p_rbd, priv->n_rbd,
+				&priv->ring_rbd, "RX");
+}
+
+/*
+ * read timestamp from buffer descriptor
+ *    the pbuf and next fields of the buffer descriptors are overwritten
+ *    with the timestamp high and low register values.   The high register
+ *    counts seconds, but the sub-second increment register is programmed
+ *    with the appropriate value to increment the timestamp low register
+ *    such that it overflows at 0x8000 0000.  The low register value
+ *    (next) must be converted to units of nano secs, * 10^9 / 2^31.
+ */
+static void qfec_read_timestamp(struct buf_desc *p_bd,
+	struct skb_shared_hwtstamps *ts)
+{
+	unsigned long  sec = (unsigned long)qfec_bd_next_get(p_bd);
+	long long      ns  = (unsigned long)qfec_bd_pbuf_get(p_bd);
+
+#define BILLION		1000000000
+#define LOW_REG_BITS    31
+	ns  *= BILLION;
+	ns >>= LOW_REG_BITS;
+
+	ts->hwtstamp  = ktime_set(sec, ns);
+	ts->syststamp = ktime_set(sec, ns);
+}
+
+/*
+ * free transmitted skbufs from buffer-descriptor no owned by HW
+ */
+static int qfec_tx_replenish(struct net_device *dev)
+{
+	struct qfec_priv   *priv   = netdev_priv(dev);
+	struct ring        *p_ring = &priv->ring_tbd;
+	struct buf_desc    *p_bd   = &priv->p_tbd[qfec_ring_tail(p_ring)];
+	struct sk_buff     *skb;
+	unsigned long      flags;
+
+	CNTR_INC(priv, tx_replenish);
+
+	spin_lock_irqsave(&priv->xmit_lock, flags);
+
+	while (!qfec_ring_empty(p_ring))  {
+		if (qfec_bd_own(p_bd))
+			break;          /* done for now */
+
+		skb = qfec_bd_skbuf_get(p_bd);
+		if (unlikely(skb == NULL))  {
+			QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
+			CNTR_INC(priv, tx_skb_null);
+			break;
+		}
+
+		qfec_reg_write(priv, STATUS_REG,
+			STATUS_REG_TU | STATUS_REG_TI);
+
+		/* retrieve timestamp if requested */
+		if (qfec_bd_status_get(p_bd) & BUF_TX_TTSS)  {
+			CNTR_INC(priv, ts_tx_rtn);
+			qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
+			skb_tstamp_tx(skb, skb_hwtstamps(skb));
+		}
+
+		/* update statistics before freeing skb */
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes  += skb->len;
+
+		dma_unmap_single(&dev->dev, (dma_addr_t) qfec_bd_pbuf_get(p_bd),
+				skb->len, DMA_TO_DEVICE);
+
+		dev_kfree_skb_any(skb);
+		qfec_bd_skbuf_set(p_bd, NULL);
+
+		qfec_ring_tail_adv(p_ring);
+		p_bd   = &priv->p_tbd[qfec_ring_tail(p_ring)];
+	}
+
+	spin_unlock_irqrestore(&priv->xmit_lock, flags);
+
+	qfec_queue_start(dev);
+
+	return 0;
+}
+
+/*
+ * clear ownership bits of all TX buf-desc and release the sk-bufs
+ */
+static void qfec_tx_timeout(struct net_device *dev)
+{
+	struct qfec_priv   *priv   = netdev_priv(dev);
+	struct buf_desc    *bd     = priv->p_tbd;
+	int                 n;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+	CNTR_INC(priv, tx_timeout);
+
+	for (n = 0; n < priv->n_tbd; n++, bd++)
+		qfec_bd_own_clr(bd);
+
+	qfec_tx_replenish(dev);
+}
+
+/*
+ * rx() - process a received frame
+ */
+static void qfec_rx_int(struct net_device *dev)
+{
+	struct qfec_priv   *priv   = netdev_priv(dev);
+	struct ring        *p_ring = &priv->ring_rbd;
+	struct buf_desc    *p_bd   = priv->p_latest_rbd;
+	uint32_t desc_status;
+	uint32_t mis_fr_reg;
+
+	desc_status = qfec_bd_status_get(p_bd);
+	mis_fr_reg = qfec_reg_read(priv, MIS_FR_REG);
+
+	CNTR_INC(priv, rx_int);
+
+	/* check that valid interrupt occurred */
+	if (unlikely(desc_status & BUF_OWN)) {
+		char  s[100];
+
+		qfec_bd_fmt(s, sizeof(s), p_bd);
+		QFEC_LOG_ERR("%s: owned by DMA, %08x, %s\n", __func__,
+			qfec_reg_read(priv, CUR_HOST_RX_DES_REG), s);
+		CNTR_INC(priv, rx_owned);
+		return;
+	}
+
+	/* accumulate missed-frame count (reg reset when read) */
+	priv->stats.rx_missed_errors += mis_fr_reg
+					& MIS_FR_REG_MISS_CNT;
+
+	/* process all unowned frames */
+	while (!(desc_status & BUF_OWN) && (!qfec_ring_full(p_ring)))  {
+		struct sk_buff     *skb;
+		struct buf_desc    *p_bd_next;
+
+		skb = qfec_bd_skbuf_get(p_bd);
+
+		if (unlikely(skb == NULL))  {
+			QFEC_LOG_ERR("%s: null sk_buff\n", __func__);
+			CNTR_INC(priv, rx_skb_null);
+			break;
+		}
+
+		/* cache coherency before skb->data is accessed */
+		dma_unmap_single(&dev->dev,
+			(dma_addr_t) qfec_bd_phys_get(p_bd),
+			ETH_BUF_SIZE, DMA_FROM_DEVICE);
+		prefetch(skb->data);
+
+		if (unlikely(desc_status & BUF_RX_ES)) {
+			priv->stats.rx_dropped++;
+			CNTR_INC(priv, rx_dropped);
+			dev_kfree_skb(skb);
+		} else  {
+			qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI);
+
+			skb->len = BUF_RX_FL_GET_FROM_STATUS(desc_status);
+
+			if (priv->state & timestamping)  {
+				CNTR_INC(priv, ts_rec);
+				qfec_read_timestamp(p_bd, skb_hwtstamps(skb));
+			}
+
+			/* update statistics before freeing skb */
+			priv->stats.rx_packets++;
+			priv->stats.rx_bytes  += skb->len;
+
+			skb->dev        = dev;
+			skb->protocol   = eth_type_trans(skb, dev);
+			skb->ip_summed  = CHECKSUM_UNNECESSARY;
+
+			if (NET_RX_DROP == netif_rx(skb))  {
+				priv->stats.rx_dropped++;
+				CNTR_INC(priv, rx_dropped);
+			}
+			CNTR_INC(priv, netif_rx_cntr);
+		}
+
+		if (p_bd != priv->p_ending_rbd)
+			p_bd_next = p_bd + 1;
+		else
+			p_bd_next = priv->p_rbd;
+		desc_status = qfec_bd_status_get(p_bd_next);
+
+		qfec_bd_skbuf_set(p_bd, NULL);
+
+		qfec_ring_head_adv(p_ring);
+		p_bd = p_bd_next;
+	}
+
+	priv->p_latest_rbd = p_bd;
+
+	/* replenish bufs */
+	while (!qfec_ring_empty(p_ring))  {
+		if (qfec_rbd_init(dev, &priv->p_rbd[qfec_ring_tail(p_ring)]))
+			break;
+		qfec_ring_tail_adv(p_ring);
+	}
+}
+
+/*
+ * isr() - interrupt service routine
+ *          determine cause of interrupt and invoke/schedule appropriate
+ *          processing or error handling
+ */
+#define ISR_ERR_CHK(priv, status, interrupt, cntr) \
+	if (status & interrupt) \
+		CNTR_INC(priv, cntr)
+
+static irqreturn_t qfec_int(int irq, void *dev_id)
+{
+	struct net_device  *dev      = dev_id;
+	struct qfec_priv   *priv     = netdev_priv(dev);
+	uint32_t            status   = qfec_reg_read(priv, STATUS_REG);
+	uint32_t            int_bits = STATUS_REG_NIS | STATUS_REG_AIS;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, dev->name);
+
+	/* abnormal interrupt */
+	if (status & STATUS_REG_AIS)  {
+		QFEC_LOG(QFEC_LOG_DBG, "%s: abnormal status 0x%08x\n",
+			__func__, status);
+
+		ISR_ERR_CHK(priv, status, STATUS_REG_RU,  rx_buf_unavail);
+		ISR_ERR_CHK(priv, status, STATUS_REG_FBI, fatal_bus);
+
+		ISR_ERR_CHK(priv, status, STATUS_REG_RWT, rx_watchdog);
+		ISR_ERR_CHK(priv, status, STATUS_REG_RPS, rx_proc_stopped);
+		ISR_ERR_CHK(priv, status, STATUS_REG_UNF, tx_underflow);
+
+		ISR_ERR_CHK(priv, status, STATUS_REG_OVF, rx_overflow);
+		ISR_ERR_CHK(priv, status, STATUS_REG_TJT, tx_jabber_tmout);
+		ISR_ERR_CHK(priv, status, STATUS_REG_TPS, tx_proc_stopped);
+
+		int_bits |= STATUS_REG_AIS_BITS;
+		CNTR_INC(priv, abnorm_int);
+	}
+
+	if (status & STATUS_REG_NIS)
+		CNTR_INC(priv, norm_int);
+
+	/* receive interrupt */
+	if (status & STATUS_REG_RI)  {
+		CNTR_INC(priv, rx_isr);
+		qfec_rx_int(dev);
+	}
+
+	/* transmit interrupt */
+	if (status & STATUS_REG_TI)  {
+		CNTR_INC(priv, tx_isr);
+		qfec_tx_replenish(dev);
+	}
+
+	/* gmac interrupt */
+	if (status & (STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI))  {
+		CNTR_INC(priv, gmac_isr);
+		int_bits |= STATUS_REG_GMI;
+	}
+
+	/* clear interrupts */
+	qfec_reg_write(priv, STATUS_REG, int_bits);
+	CNTR_INC(priv, isr);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * open () - register system resources (IRQ, DMA, ...)
+ *   turn on HW, perform device setup.
+ */
+static int qfec_open(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	struct buf_desc    *p_bd;
+	struct ring        *p_ring;
+	struct qfec_buf_desc *p_desc;
+	int                 n;
+	int                 res = 0;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev);
+
+	if (!dev)  {
+		res = -EINVAL;
+		goto err;
+	}
+
+	/* allocate TX/RX buffer-descriptors and buffers */
+
+	res = qfec_mem_alloc(dev);
+	if (res)
+		goto err;
+
+	/* initialize TX */
+	p_desc = priv->bd_base;
+
+	for (n = 0, p_bd = priv->p_tbd; n < priv->n_tbd; n++, p_bd++) {
+		p_bd->p_desc = p_desc++;
+
+		if (n == (priv->n_tbd - 1))
+			qfec_bd_last_bd_set(p_bd);
+
+		qfec_bd_own_clr(p_bd);      /* clear ownership */
+	}
+
+	qfec_ring_init(&priv->ring_tbd, priv->n_tbd, priv->n_tbd);
+
+	priv->tx_ic_mod = priv->n_tbd / TX_BD_TI_RATIO;
+	if (priv->tx_ic_mod == 0)
+		priv->tx_ic_mod = 1;
+
+	/* initialize RX buffer descriptors and allocate sk_bufs */
+	p_ring = &priv->ring_rbd;
+	qfec_ring_init(p_ring, priv->n_rbd, 0);
+	qfec_bd_last_bd_set(&priv->p_rbd[priv->n_rbd - 1]);
+
+	for (n = 0, p_bd = priv->p_rbd; n < priv->n_rbd; n++, p_bd++) {
+		p_bd->p_desc = p_desc++;
+
+		if (qfec_rbd_init(dev, p_bd))
+			break;
+		qfec_ring_tail_adv(p_ring);
+	}
+
+	priv->p_latest_rbd = priv->p_rbd;
+	priv->p_ending_rbd = priv->p_rbd + priv->n_rbd - 1;
+
+	/* config ptp clock */
+	qfec_ptp_cfg(priv);
+
+	/* configure PHY - must be set before reset/hw_init */
+	qfec_intf_sel(priv, intfc_mii);
+
+	/* initialize controller after BDs allocated */
+	res = qfec_hw_init(priv);
+	if (res)
+		goto err1;
+
+	/* get/set (primary) MAC address */
+	qfec_set_adr_regs(priv, dev->dev_addr);
+
+	/* start phy monitor */
+	QFEC_LOG(QFEC_LOG_DBG, " %s: start timer\n", __func__);
+	netif_carrier_off(priv->net_dev);
+	setup_timer(&priv->phy_tmr, qfec_phy_monitor, (unsigned long)dev);
+	mod_timer(&priv->phy_tmr, jiffies + HZ);
+
+	/* initialize interrupts */
+	QFEC_LOG(QFEC_LOG_DBG, " %s: request irq %d\n", __func__, dev->irq);
+	res = request_irq(dev->irq, qfec_int, 0, dev->name, dev);
+	if (res)
+		goto err1;
+
+	/* enable controller */
+	qfec_hw_enable(priv);
+	netif_start_queue(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %08x link, %08x carrier\n", __func__,
+		mii_link_ok(&priv->mii), netif_carrier_ok(priv->net_dev));
+
+	QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
+	return 0;
+
+err1:
+	qfec_mem_dealloc(dev);
+err:
+	QFEC_LOG_ERR("%s: error - %d\n", __func__, res);
+	return res;
+}
+
+/*
+ * stop() - "reverse operations performed at open time"
+ */
+static int qfec_stop(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	struct buf_desc    *p_bd;
+	struct sk_buff     *skb;
+	int                 n;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	del_timer_sync(&priv->phy_tmr);
+
+	qfec_hw_disable(priv);
+	qfec_queue_stop(dev);
+	free_irq(dev->irq, dev);
+
+	/* free all pending sk_bufs */
+	for (n = priv->n_rbd, p_bd = priv->p_rbd; n > 0; n--, p_bd++) {
+		skb = qfec_bd_skbuf_get(p_bd);
+		if (skb)
+			dev_kfree_skb(skb);
+	}
+
+	for (n = priv->n_tbd, p_bd = priv->p_tbd; n > 0; n--, p_bd++) {
+		skb = qfec_bd_skbuf_get(p_bd);
+		if (skb)
+			dev_kfree_skb(skb);
+	}
+
+	qfec_mem_dealloc(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__);
+
+	return 0;
+}
+
+static int qfec_set_config(struct net_device *dev, struct ifmap *map)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+	return 0;
+}
+
+/*
+ * pass data from skbuf to buf-desc
+ */
+static int qfec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct qfec_priv   *priv   = netdev_priv(dev);
+	struct ring        *p_ring = &priv->ring_tbd;
+	struct buf_desc    *p_bd;
+	uint32_t            ctrl   = 0;
+	int                 ret    = NETDEV_TX_OK;
+	unsigned long       flags;
+
+	CNTR_INC(priv, xmit);
+
+	spin_lock_irqsave(&priv->xmit_lock, flags);
+
+	/* stop queuing if no resources available */
+	if (qfec_ring_room(p_ring) == 0)  {
+		qfec_queue_stop(dev);
+		CNTR_INC(priv, tx_no_resource);
+
+		ret = NETDEV_TX_BUSY;
+		goto done;
+	}
+
+	/* locate and save *sk_buff */
+	p_bd = &priv->p_tbd[qfec_ring_head(p_ring)];
+	qfec_bd_skbuf_set(p_bd, skb);
+
+	/* set DMA ptr to sk_buff data and write cache to memory */
+	qfec_bd_pbuf_set(p_bd, (void *)
+	dma_map_single(&dev->dev,
+		(void *)skb->data, skb->len, DMA_TO_DEVICE));
+
+	ctrl  = skb->len;
+	if (!(qfec_ring_head(p_ring) % priv->tx_ic_mod))
+		ctrl |= BUF_TX_IC; /* interrupt on complete */
+
+	/* check if timestamping enabled and requested */
+	if (priv->state & timestamping)  {
+		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+			CNTR_INC(priv, ts_tx_en);
+			ctrl |= BUF_TX_IC;	/* interrupt on complete */
+			ctrl |= BUF_TX_TTSE;	/* enable timestamp */
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		}
+	}
+
+	if (qfec_bd_last_bd(p_bd))
+		ctrl |= BUF_RX_RER;
+
+	/* no gather, no multi buf frames */
+	ctrl |= BUF_TX_FS | BUF_TX_LS;  /* 1st and last segment */
+
+	qfec_bd_ctl_wr(p_bd, ctrl);
+	qfec_bd_status_set(p_bd, BUF_OWN);
+
+	qfec_ring_head_adv(p_ring);
+	qfec_reg_write(priv, TX_POLL_DEM_REG, 1);      /* poll */
+
+done:
+	spin_unlock_irqrestore(&priv->xmit_lock, flags);
+
+	return ret;
+}
+
+static int qfec_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct qfec_priv        *priv = netdev_priv(dev);
+	struct hwtstamp_config  *cfg  = (struct hwtstamp_config *) ifr;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	if (cmd == SIOCSHWTSTAMP) {
+		CNTR_INC(priv, ts_ioctl);
+		QFEC_LOG(QFEC_LOG_DBG,
+			"%s: SIOCSHWTSTAMP - %x flags  %x tx  %x rx\n",
+			__func__, cfg->flags, cfg->tx_type, cfg->rx_filter);
+
+		cfg->flags      = 0;
+		cfg->tx_type    = HWTSTAMP_TX_ON;
+		cfg->rx_filter  = HWTSTAMP_FILTER_ALL;
+
+		priv->state |= timestamping;
+		qfec_reg_write(priv, TS_CTL_REG,
+			qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSENALL);
+
+		return 0;
+	}
+
+	return generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL);
+}
+
+static struct net_device_stats *qfec_get_stats(struct net_device *dev)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG2, "qfec_stats:\n");
+
+	return &priv->stats;
+}
+
+/*
+ * accept new mac address
+ */
+static int qfec_set_mac_address(struct net_device *dev, void *p)
+{
+	struct qfec_priv   *priv = netdev_priv(dev);
+	struct sockaddr    *addr = p;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	qfec_set_adr_regs(priv, dev->dev_addr);
+
+	return 0;
+}
+
+/*
+ *  read discontinuous MAC address from corrected fuse memory region
+ */
+
+static int qfec_get_mac_address(char *buf, char *mac_base, int nBytes)
+{
+	static int  offset[] = { 0, 1, 2, 3, 4, 8 };
+	int         n;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	for (n = 0; n < nBytes; n++)
+		buf[n] = ioread8(mac_base + offset[n]);
+
+	/* check that MAC programmed  */
+	if ((buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5]) == 0)  {
+		QFEC_LOG_ERR("%s: null MAC address\n", __func__);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+/*
+ * static definition of driver functions
+ */
+static const struct net_device_ops qfec_netdev_ops = {
+	.ndo_open               = qfec_open,
+	.ndo_stop               = qfec_stop,
+	.ndo_start_xmit         = qfec_xmit,
+
+	.ndo_do_ioctl           = qfec_do_ioctl,
+	.ndo_tx_timeout         = qfec_tx_timeout,
+	.ndo_set_mac_address    = qfec_set_mac_address,
+
+	.ndo_change_mtu         = eth_change_mtu,
+	.ndo_validate_addr      = eth_validate_addr,
+
+	.ndo_get_stats          = qfec_get_stats,
+	.ndo_set_config         = qfec_set_config,
+};
+
+/*
+ * ethtool functions
+ */
+
+static int qfec_nway_reset(struct net_device *dev)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+	return mii_nway_restart(&priv->mii);
+}
+
+/*
+ * speed, duplex, auto-neg settings
+ */
+static void qfec_ethtool_getpauseparam(struct net_device *dev,
+			struct ethtool_pauseparam *pp)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+	u32                flow = qfec_reg_read(priv, FLOW_CONTROL_REG);
+	u32                advert;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	/* report current settings */
+	pp->tx_pause = (flow & FLOW_CONTROL_TFE) != 0;
+	pp->rx_pause = (flow & FLOW_CONTROL_RFE) != 0;
+
+	/* report if pause is being advertised */
+	advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
+	pp->autoneg =
+		(advert & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
+}
+
+static int qfec_ethtool_setpauseparam(struct net_device *dev,
+			struct ethtool_pauseparam *pp)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+	u32                advert;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %d aneg, %d rx, %d tx\n", __func__,
+		pp->autoneg, pp->rx_pause, pp->tx_pause);
+
+	advert  =  qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE);
+	advert &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+	/* If pause autonegotiation is enabled, but both rx and tx are not
+	 * because neither was specified in the ethtool cmd,
+	 * enable both symetrical and asymetrical pause.
+	 * otherwise, only enable the pause mode indicated by rx/tx.
+	 */
+	if (pp->autoneg)  {
+		if (pp->rx_pause)
+			advert |= ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP;
+		else if (pp->tx_pause)
+			advert |= ADVERTISE_PAUSE_ASYM;
+		else
+			advert |= ADVERTISE_PAUSE_CAP;
+	}
+
+	qfec_mdio_write(dev, priv->phy_id, MII_ADVERTISE, advert);
+
+	return 0;
+}
+
+/*
+ * ethtool ring parameter (-g/G) support
+ */
+
+/*
+ * setringparamam - change the tx/rx ring lengths
+ */
+#define MIN_RING_SIZE	3
+#define MAX_RING_SIZE	1000
+static int qfec_ethtool_setringparam(struct net_device *dev,
+	struct ethtool_ringparam *ring)
+{
+	struct qfec_priv  *priv    = netdev_priv(dev);
+	u32                timeout = 20;
+
+	/* notify stack the link is down */
+	netif_carrier_off(dev);
+
+	/* allow tx to complete & free skbufs on the tx ring */
+	do {
+		usleep_range(10000, 100000);
+		qfec_tx_replenish(dev);
+
+		if (timeout-- == 0)  {
+			QFEC_LOG_ERR("%s: timeout\n", __func__);
+			return -ETIME;
+		}
+	} while (!qfec_ring_empty(&priv->ring_tbd));
+
+
+	qfec_stop(dev);
+
+	/* set tx ring size */
+	if (ring->tx_pending < MIN_RING_SIZE)
+		ring->tx_pending = MIN_RING_SIZE;
+	else if (ring->tx_pending > MAX_RING_SIZE)
+		ring->tx_pending = MAX_RING_SIZE;
+	priv->n_tbd = ring->tx_pending;
+
+	/* set rx ring size */
+	if (ring->rx_pending < MIN_RING_SIZE)
+		ring->rx_pending = MIN_RING_SIZE;
+	else if (ring->rx_pending > MAX_RING_SIZE)
+		ring->rx_pending = MAX_RING_SIZE;
+	priv->n_rbd = ring->rx_pending;
+
+
+	qfec_open(dev);
+
+	return 0;
+}
+
+/*
+ * getringparamam - returns local values
+ */
+static void qfec_ethtool_getringparam(struct net_device *dev,
+	struct ethtool_ringparam *ring)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	ring->rx_max_pending       = MAX_RING_SIZE;
+	ring->rx_mini_max_pending  = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->tx_max_pending       = MAX_RING_SIZE;
+
+	ring->rx_pending           = priv->n_rbd;
+	ring->rx_mini_pending      = 0;
+	ring->rx_jumbo_pending     = 0;
+	ring->tx_pending           = priv->n_tbd;
+}
+
+/*
+ * speed, duplex, auto-neg settings
+ */
+static int
+qfec_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	cmd->maxrxpkt = priv->n_rbd;
+	cmd->maxtxpkt = priv->n_tbd;
+
+	return mii_ethtool_gset(&priv->mii, cmd);
+}
+
+static int
+qfec_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct qfec_priv  *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	return mii_ethtool_sset(&priv->mii, cmd);
+}
+
+/*
+ * msg/debug level
+ */
+static u32 qfec_ethtool_getmsglevel(struct net_device *dev)
+{
+	return qfec_debug;
+}
+
+static void qfec_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+	qfec_debug ^= level;	/* toggle on/off */
+}
+
+/*
+ * register dump
+ */
+#define DMA_DMP_OFFSET  0x0000
+#define DMA_REG_OFFSET  0x1000
+#define DMA_REG_LEN     23
+
+#define MAC_DMP_OFFSET  0x0080
+#define MAC_REG_OFFSET  0x0000
+#define MAC_REG_LEN     55
+
+#define TS_DMP_OFFSET   0x0180
+#define TS_REG_OFFSET   0x0700
+#define TS_REG_LEN      15
+
+#define MDIO_DMP_OFFSET 0x0200
+#define MDIO_REG_LEN    16
+
+#define REG_SIZE    (MDIO_DMP_OFFSET + (MDIO_REG_LEN * sizeof(short)))
+
+static int qfec_ethtool_getregs_len(struct net_device *dev)
+{
+	return REG_SIZE;
+}
+
+static void
+qfec_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
+			 void *buf)
+{
+	struct qfec_priv  *priv   = netdev_priv(dev);
+	u32               *data   = buf;
+	u16               *data16;
+	unsigned int       i;
+	unsigned int       j;
+	unsigned int       n;
+
+	memset(buf, 0, REG_SIZE);
+
+	j = DMA_DMP_OFFSET / sizeof(u32);
+	for (i = DMA_REG_OFFSET, n = DMA_REG_LEN; n--; i += sizeof(u32))
+		data[j++] = htonl(qfec_reg_read(priv, i));
+
+	j = MAC_DMP_OFFSET / sizeof(u32);
+	for (i = MAC_REG_OFFSET, n = MAC_REG_LEN; n--; i += sizeof(u32))
+		data[j++] = htonl(qfec_reg_read(priv, i));
+
+	j = TS_DMP_OFFSET / sizeof(u32);
+	for (i = TS_REG_OFFSET, n = TS_REG_LEN; n--; i += sizeof(u32))
+		data[j++] = htonl(qfec_reg_read(priv, i));
+
+	data16 = (u16 *)&data[MDIO_DMP_OFFSET / sizeof(u32)];
+	for (i = 0, n = 0; i < MDIO_REG_LEN; i++)
+		data16[n++] = htons(qfec_mdio_read(dev, 0, i));
+
+	regs->len     = REG_SIZE;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, regs->len);
+}
+
+/*
+ * statistics
+ *   return counts of various ethernet activity.
+ *   many of these are same as in struct net_device_stats
+ *
+ *   missed-frames indicates the number of attempts made by the ethernet
+ *      controller to write to a buffer-descriptor when the BD ownership
+ *      bit was not set.   The rxfifooverflow counter (0x1D4) is not
+ *      available.  The Missed Frame and Buffer Overflow Counter register
+ *      (0x1020) is used, but has only 16-bits and is reset when read.
+ *      It is read and updates the value in priv->stats.rx_missed_errors
+ *      in qfec_rx_int().
+ */
+static char qfec_stats_strings[][ETH_GSTRING_LEN] = {
+	"TX good/bad Bytes         ",
+	"TX Bytes                  ",
+	"TX good/bad Frames        ",
+	"TX Bcast Frames           ",
+	"TX Mcast Frames           ",
+	"TX Unicast Frames         ",
+	"TX Pause Frames           ",
+	"TX Vlan Frames            ",
+	"TX Frames 64              ",
+	"TX Frames 65-127          ",
+	"TX Frames 128-255         ",
+	"TX Frames 256-511         ",
+	"TX Frames 512-1023        ",
+	"TX Frames 1024+           ",
+	"TX Pause Frames           ",
+	"TX Collisions             ",
+	"TX Late Collisions        ",
+	"TX Excessive Collisions   ",
+
+	"RX good/bad Bytes         ",
+	"RX Bytes                  ",
+	"RX good/bad Frames        ",
+	"RX Bcast Frames           ",
+	"RX Mcast Frames           ",
+	"RX Unicast Frames         ",
+	"RX Pause Frames           ",
+	"RX Vlan Frames            ",
+	"RX Frames 64              ",
+	"RX Frames 65-127          ",
+	"RX Frames 128-255         ",
+	"RX Frames 256-511         ",
+	"RX Frames 512-1023        ",
+	"RX Frames 1024+           ",
+	"RX Pause Frames           ",
+	"RX Crc error Frames       ",
+	"RX Length error Frames    ",
+	"RX Alignment error Frames ",
+	"RX Runt Frames            ",
+	"RX Oversize Frames        ",
+	"RX Missed Frames          ",
+
+};
+
+static u32 qfec_stats_regs[] =  {
+
+	     69,     89,     70,     71,     72,     90,     92,     93,
+	     73,     74,     75,     76,     77,     78,     92,     84,
+	     86,     87,
+
+	     97,     98,     96,     99,    100,    113,    116,    118,
+	    107,    108,    109,    110,    111,    112,    116,    101,
+	    114,    102,    103,    106
+};
+
+static int qfec_stats_show(struct device *dev, struct device_attribute *attr,
+				char *buf)
+{
+	struct qfec_priv  *priv = netdev_priv(to_net_dev(dev));
+	int                count = PAGE_SIZE;
+	int                l     = 0;
+	int                n;
+
+	QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__);
+
+	for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)  {
+		l += snprintf(&buf[l], count - l, "      %12u  %s\n",
+			qfec_reg_read(priv,
+				qfec_stats_regs[n] * sizeof(uint32_t)),
+			qfec_stats_strings[n]);
+	}
+
+	return l;
+}
+
+static int qfec_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(qfec_stats_regs) + 1;	/* missed frames */
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void qfec_ethtool_getstrings(struct net_device *dev, u32 stringset,
+		u8 *buf)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__,
+		sizeof(qfec_stats_strings));
+
+	memcpy(buf, qfec_stats_strings, sizeof(qfec_stats_strings));
+}
+
+static void qfec_ethtool_getstats(struct net_device *dev,
+		struct ethtool_stats *stats, uint64_t *data)
+{
+	struct qfec_priv        *priv = netdev_priv(dev);
+	int                      j = 0;
+	int                      n;
+
+	for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++)
+		data[j++] = qfec_reg_read(priv,
+				qfec_stats_regs[n] * sizeof(uint32_t));
+
+	data[j++] = priv->stats.rx_missed_errors;
+
+	stats->n_stats = j;
+}
+
+static void qfec_ethtool_getdrvinfo(struct net_device *dev,
+					struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver,  QFEC_NAME,    sizeof(info->driver));
+	strlcpy(info->version, QFEC_DRV_VER, sizeof(info->version));
+	strlcpy(info->bus_info, dev_name(dev->dev.parent),
+		sizeof(info->bus_info));
+
+	info->eedump_len  = 0;
+	info->regdump_len = qfec_ethtool_getregs_len(dev);
+}
+
+/*
+ * ethtool ops table
+ */
+static const struct ethtool_ops qfec_ethtool_ops = {
+	.nway_reset         = qfec_nway_reset,
+
+	.get_settings       = qfec_ethtool_getsettings,
+	.set_settings       = qfec_ethtool_setsettings,
+	.get_link           = ethtool_op_get_link,
+	.get_drvinfo        = qfec_ethtool_getdrvinfo,
+	.get_msglevel       = qfec_ethtool_getmsglevel,
+	.set_msglevel       = qfec_ethtool_setmsglevel,
+	.get_regs_len       = qfec_ethtool_getregs_len,
+	.get_regs           = qfec_ethtool_getregs,
+
+	.get_ringparam      = qfec_ethtool_getringparam,
+	.set_ringparam      = qfec_ethtool_setringparam,
+
+	.get_pauseparam     = qfec_ethtool_getpauseparam,
+	.set_pauseparam     = qfec_ethtool_setpauseparam,
+
+	.get_sset_count     = qfec_get_sset_count,
+	.get_strings        = qfec_ethtool_getstrings,
+	.get_ethtool_stats  = qfec_ethtool_getstats,
+};
+
+/*
+ *  create sysfs entries
+ */
+static DEVICE_ATTR(bd_tx,   0444, qfec_bd_tx_show,   NULL);
+static DEVICE_ATTR(bd_rx,   0444, qfec_bd_rx_show,   NULL);
+static DEVICE_ATTR(cfg,     0444, qfec_config_show,  NULL);
+static DEVICE_ATTR(clk_reg, 0444, qfec_clk_reg_show, NULL);
+static DEVICE_ATTR(cntrs,   0444, qfec_cntrs_show,   NULL);
+static DEVICE_ATTR(stats,   0444, qfec_stats_show,   NULL);
+static DEVICE_ATTR(reg,     0444, qfec_reg_show,     NULL);
+
+static void qfec_sysfs_create(struct net_device *dev)
+{
+	if (device_create_file(&(dev->dev), &dev_attr_bd_tx) ||
+		device_create_file(&(dev->dev), &dev_attr_bd_rx) ||
+		device_create_file(&(dev->dev), &dev_attr_cfg) ||
+		device_create_file(&(dev->dev), &dev_attr_clk_reg) ||
+		device_create_file(&(dev->dev), &dev_attr_cntrs) ||
+		device_create_file(&(dev->dev), &dev_attr_reg) ||
+		device_create_file(&(dev->dev), &dev_attr_stats))
+		pr_err("qfec_sysfs_create failed to create sysfs files\n");
+}
+
+/*
+ * map a specified resource
+ */
+static int qfec_map_resource(struct platform_device *plat, int resource,
+	struct resource **priv_res,
+	void                   **addr)
+{
+	struct resource         *res;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: 0x%x resource\n", __func__, resource);
+
+	/* allocate region to access controller registers */
+	*priv_res = res = platform_get_resource(plat, resource, 0);
+	if (!res) {
+		QFEC_LOG_ERR("%s: platform_get_resource failed\n", __func__);
+		return -ENODEV;
+	}
+
+	res = request_mem_region(res->start, res->end - res->start, QFEC_NAME);
+	if (!res) {
+		QFEC_LOG_ERR("%s: request_mem_region failed, %08x %08x\n",
+			__func__, res->start, res->end - res->start);
+		return -EBUSY;
+	}
+
+	*addr = ioremap(res->start, res->end - res->start);
+	if (!*addr)
+		return -ENOMEM;
+
+	QFEC_LOG(QFEC_LOG_DBG, " %s: io mapped from %p to %p\n",
+		__func__, (void *)res->start, *addr);
+
+	return 0;
+};
+
+/*
+ * free allocated io regions
+ */
+static void qfec_free_res(struct resource *res, void *base)
+{
+
+	if (res)  {
+		if (base)
+			iounmap((void __iomem *)base);
+
+		release_mem_region(res->start, res->end - res->start);
+	}
+};
+
+/*
+ * probe function that obtain configuration info and allocate net_device
+ */
+static int __devinit qfec_probe(struct platform_device *plat)
+{
+	struct net_device  *dev;
+	struct qfec_priv   *priv;
+	int                 ret = 0;
+
+	/* allocate device */
+	dev = alloc_etherdev(sizeof(struct qfec_priv));
+	if (!dev) {
+		QFEC_LOG_ERR("%s: alloc_etherdev failed\n", __func__);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %08x dev\n",      __func__, (int)dev);
+
+	qfec_dev = dev;
+	SET_NETDEV_DEV(dev, &plat->dev);
+
+	dev->netdev_ops      = &qfec_netdev_ops;
+	dev->ethtool_ops     = &qfec_ethtool_ops;
+	dev->watchdog_timeo  = 2 * HZ;
+	dev->irq             = platform_get_irq(plat, 0);
+
+	dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+	/* initialize private data */
+	priv = (struct qfec_priv *)netdev_priv(dev);
+	memset((void *)priv, 0, sizeof(priv));
+
+	priv->net_dev   = dev;
+	platform_set_drvdata(plat, dev);
+
+	priv->n_tbd     = TX_BD_NUM;
+	priv->n_rbd     = RX_BD_NUM;
+
+	/* initialize phy structure */
+	priv->mii.phy_id_mask   = 0x1F;
+	priv->mii.reg_num_mask  = 0x1F;
+	priv->mii.dev           = dev;
+	priv->mii.mdio_read     = qfec_mdio_read;
+	priv->mii.mdio_write    = qfec_mdio_write;
+
+	/* map register regions */
+	ret = qfec_map_resource(
+		plat, IORESOURCE_MEM, &priv->mac_res, &priv->mac_base);
+	if (ret)  {
+		QFEC_LOG_ERR("%s: IORESOURCE_MEM mac failed\n", __func__);
+		goto err1;
+	}
+
+	ret = qfec_map_resource(
+		plat, IORESOURCE_IO, &priv->clk_res, &priv->clk_base);
+	if (ret)  {
+		QFEC_LOG_ERR("%s: IORESOURCE_IO clk failed\n", __func__);
+		goto err2;
+	}
+
+	ret = qfec_map_resource(
+		plat, IORESOURCE_DMA, &priv->fuse_res, &priv->fuse_base);
+	if (ret)  {
+		QFEC_LOG_ERR("%s: IORESOURCE_DMA fuse failed\n", __func__);
+		goto err3;
+	}
+
+	/* initialize MAC addr */
+	ret = qfec_get_mac_address(dev->dev_addr, priv->fuse_base,
+		MAC_ADDR_SIZE);
+	if (ret)
+		goto err4;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: mac  %02x:%02x:%02x:%02x:%02x:%02x\n",
+		__func__,
+		dev->dev_addr[0], dev->dev_addr[1],
+		dev->dev_addr[2], dev->dev_addr[3],
+		dev->dev_addr[4], dev->dev_addr[5]);
+
+	ret = register_netdev(dev);
+	if (ret)  {
+		QFEC_LOG_ERR("%s: register_netdev failed\n", __func__);
+		goto err4;
+	}
+
+	spin_lock_init(&priv->mdio_lock);
+	spin_lock_init(&priv->xmit_lock);
+	qfec_sysfs_create(dev);
+
+	return 0;
+
+	/* error handling */
+err4:
+	qfec_free_res(priv->fuse_res, priv->fuse_base);
+err3:
+	qfec_free_res(priv->clk_res, priv->clk_base);
+err2:
+	qfec_free_res(priv->mac_res, priv->mac_base);
+err1:
+	free_netdev(dev);
+err:
+	QFEC_LOG_ERR("%s: err\n", __func__);
+	return ret;
+}
+
+/*
+ * module remove
+ */
+static int __devexit qfec_remove(struct platform_device *plat)
+{
+	struct net_device  *dev  = platform_get_drvdata(plat);
+	struct qfec_priv   *priv = netdev_priv(dev);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	platform_set_drvdata(plat, NULL);
+
+	qfec_free_res(priv->fuse_res, priv->fuse_base);
+	qfec_free_res(priv->clk_res, priv->clk_base);
+	qfec_free_res(priv->mac_res, priv->mac_base);
+
+	unregister_netdev(dev);
+	free_netdev(dev);
+
+	return 0;
+}
+
+/*
+ * module support
+ *     the FSM9xxx is not a mobile device does not support power management
+ */
+
+static struct platform_driver qfec_driver = {
+	.probe  = qfec_probe,
+	.remove = __devexit_p(qfec_remove),
+	.driver = {
+		.name   = QFEC_NAME,
+		.owner  = THIS_MODULE,
+	},
+};
+
+/*
+ * module init
+ */
+static int __init qfec_init_module(void)
+{
+	int  res;
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %s\n", __func__, qfec_driver.driver.name);
+
+	res = platform_driver_register(&qfec_driver);
+
+	QFEC_LOG(QFEC_LOG_DBG, "%s: %d - platform_driver_register\n",
+		__func__, res);
+
+	return  res;
+}
+
+/*
+ * module exit
+ */
+static void __exit qfec_exit_module(void)
+{
+	QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__);
+
+	platform_driver_unregister(&qfec_driver);
+}
+
+MODULE_DESCRIPTION("FSM Network Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_VERSION("1.0");
+
+module_init(qfec_init_module);
+module_exit(qfec_exit_module);
diff --git a/drivers/net/qfec.h b/drivers/net/qfec.h
new file mode 100644
index 0000000..6328804
--- /dev/null
+++ b/drivers/net/qfec.h
@@ -0,0 +1,793 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* qualcomm fast Ethernet controller HW description */
+
+#ifndef _QFEC_EMAC_H_
+# define _QFEC_EMAC_H_
+
+# ifndef __KERNEL__
+#   include "stdint.h"
+# endif
+
+# define MskBits(nBits, pos)     (((1 << nBits)-1)<<pos)
+
+/* Rx/Tx Ethernet Buffer Descriptors
+ *     status contains the ownership, status and receive length bits
+ *     ctl    contains control and size bits for two buffers
+ *     p_buf  contains a ptr to the data buffer
+ *            MAC writes timestamp low into p_buf
+ *     next   contains either ptr to 2nd buffer or next buffer-desc
+ *            MAC writes timestamp high into next
+ *
+ *     status/ctl bit definition depend on RX or TX usage
+ */
+
+
+struct qfec_buf_desc {
+	uint32_t            status;
+	uint32_t            ctl;
+	void               *p_buf;
+	void               *next;
+};
+
+/* ownership bit operations */
+# define BUF_OWN                     0x80000000 /* DMA owns buffer */
+# define BUF_OWN_DMA                 BUF_OWN
+
+/* RX buffer status bits */
+# define BUF_RX_AFM               0x40000000 /* dest addr filt fail */
+
+# define BUF_RX_FL                0x3fff0000 /* frame length */
+# define BUF_RX_FL_GET(p)         ((p.status & BUF_RX_FL) >> 16)
+# define BUF_RX_FL_SET(p, x) \
+	(p.status = (p.status & ~BUF_RX_FL) | ((x << 16) & BUF_RX_FL))
+# define BUF_RX_FL_GET_FROM_STATUS(status) \
+				  (((status) & BUF_RX_FL) >> 16)
+
+# define BUF_RX_ES                0x00008000 /* error summary */
+# define BUF_RX_DE                0x00004000 /* error descriptor (es) */
+# define BUF_RX_SAF               0x00002000 /* source addr filt fail */
+# define BUF_RX_LE                0x00001000 /* length error */
+
+# define BUF_RX_OE                0x00000800 /* overflow error (es) */
+# define BUF_RX_VLAN              0x00000400 /* vlan tag */
+# define BUF_RX_FS                0x00000200 /* first descriptor */
+# define BUF_RX_LS                0x00000100 /* last  descriptor */
+
+# define BUF_RX_IPC               0x00000080 /* cksum-err/giant-frame (es) */
+# define BUF_RX_LC                0x00000040 /* late collision (es) */
+# define BUF_RX_FT                0x00000020 /* frame type */
+# define BUF_RX_RWT               0x00000010 /* rec watchdog timeout (es) */
+
+# define BUF_RX_RE                0x00000008 /* rec error (es) */
+# define BUF_RX_DBE               0x00000004 /* dribble bit err */
+# define BUF_RX_CE                0x00000002 /* crc err (es) */
+# define BUF_RX_CSE               0x00000001 /* checksum err */
+
+# define BUF_RX_ERRORS  \
+	(BUF_RX_DE  | BUF_RX_SAF | BUF_RX_LE  | BUF_RX_OE \
+	| BUF_RX_IPC | BUF_RX_LC  | BUF_RX_RWT | BUF_RX_RE \
+	| BUF_RX_DBE | BUF_RX_CE  | BUF_RX_CSE)
+
+/* RX buffer control bits */
+# define BUF_RX_DI                0x80000000 /* disable intrp on compl */
+# define BUF_RX_RER               0x02000000 /* rec end of ring */
+# define BUF_RX_RCH               0x01000000 /* 2nd addr chained */
+
+# define BUF_RX_SIZ2              0x003ff800 /* buffer 2 size */
+# define BUF_RX_SIZ2_GET(p)       ((p.control&BUF_RX_SIZ2) >> 11)
+
+# define BUF_RX_SIZ               0x000007ff /* rx buf 1 size */
+# define BUF_RX_SIZ_GET(p)        (p.ctl&BUF_RX_SIZ)
+
+/* TX buffer status bits */
+# define BUF_TX_TTSS              0x00020000 /* time stamp status */
+# define BUF_TX_IHE               0x00010000 /* IP hdr err */
+
+# define BUF_TX_ES                0x00008000 /* error summary */
+# define BUF_TX_JT                0x00004000 /* jabber timeout (es) */
+# define BUF_TX_FF                0x00002000 /* frame flushed (es) */
+# define BUF_TX_PCE               0x00001000 /* payld cksum err */
+
+# define BUF_TX_LOC               0x00000800 /* loss carrier (es) */
+# define BUF_TX_NC                0x00000400 /* no carrier (es) */
+# define BUF_TX_LC                0x00000200 /* late collision (es) */
+# define BUF_TX_EC                0x00000100 /* excessive collision (es) */
+
+# define BUF_TX_VLAN              0x00000080 /* VLAN frame */
+# define BUF_TX_CC                MskBits(4, 3) /* collision count */
+# define BUF_TX_CC_GET(p)         ((p.status&BUF_TX_CC)>>3)
+
+# define BUF_TX_ED                0x00000004 /* excessive deferral (es) */
+# define BUF_TX_UF                0x00000002 /* underflow err (es) */
+# define BUF_TX_DB                0x00000001 /* deferred bit */
+
+/* TX buffer control bits */
+# define BUF_TX_IC                0x80000000 /* intrpt on compl */
+# define BUF_TX_LS                0x40000000 /* last segment */
+# define BUF_TX_FS                0x20000000 /* first segment */
+# define BUF_TX_CIC               0x18000000 /* cksum insert control */
+# define BUF_TX_CIC_SET(n)        (BUF_TX_CIC&(n<<27))
+
+# define BUF_TX_DC                0x04000000 /* disable CRC */
+# define BUF_TX_TER               0x02000000 /* end of ring */
+# define BUF_TX_TCH               0x01000000 /* 2nd addr chained */
+
+# define BUF_TX_DP                0x00800000 /* disable padding */
+# define BUF_TX_TTSE              0x00400000 /* timestamp enable */
+
+# define BUF_TX_SIZ2              0x003ff800 /* buffer 2 size */
+# define BUF_TX_SIZ2_SET(n)       (BUF_TX_SIZ2(n<<11))
+
+# define BUF_TX_SIZ               0x000007ff /* buffer 1 size */
+# define BUF_TX_SIZ_SET(n)        (BUF_TX_SI1 & n)
+
+
+/* Ethernet Controller Registers */
+# define BUS_MODE_REG             0x1000
+
+# define BUS_MODE_MB              0x04000000  /* mixed burst */
+# define BUS_MODE_AAL             0x02000000  /* address alignment beats */
+# define BUS_MODE_8XPBL           0x01000000  /*  */
+
+# define BUS_MODE_USP             0x00800000  /* use separate PBL */
+# define BUS_MODE_RPBL            0x007e0000  /* rxDMA PBL */
+# define BUS_MODE_FB              0x00010000  /* fixed burst */
+
+# define BUS_MODE_PR              0x0000c000  /* tx/rx priority */
+# define BUS_MODE_PR4             0x0000c000  /* tx/rx priority 4:1 */
+# define BUS_MODE_PR3             0x00008000  /* tx/rx priority 3:1 */
+# define BUS_MODE_PR2             0x00004000  /* tx/rx priority 2:1 */
+# define BUS_MODE_PR1             0x00000000  /* tx/rx priority 1:1 */
+
+# define BUS_MODE_PBL             0x00003f00  /* programmable burst length */
+# define BUS_MODE_PBLSET(n)       (BUS_MODE_PBL&(n<<8))
+
+# define BUS_MODE_DSL             0x0000007c  /* descriptor skip length */
+# define BUS_MODE_DSL_SET(n)      (BUS_MODE_DSL & (n << 2))
+
+# define BUS_MODE_DA              0x00000002  /* DMA arbitration scheme  */
+# define BUS_MODE_SWR             0x00000001  /* software reset */
+
+#define BUS_MODE_REG_DEFAULT     (BUS_MODE_FB \
+				| BUS_MODE_AAL \
+				| BUS_MODE_PBLSET(16) \
+				| BUS_MODE_DA \
+				| BUS_MODE_DSL_SET(0))
+
+# define TX_POLL_DEM_REG          0x1004      /* transmit poll demand */
+# define RX_POLL_DEM_REG          0x1008      /* receive poll demand */
+
+# define RX_DES_LST_ADR_REG       0x100c      /* receive buffer descriptor */
+# define TX_DES_LST_ADR_REG       0x1010      /* transmit buffer descriptor */
+
+# define STATUS_REG               0x1014
+
+# define STATUS_REG_RSVRD_1       0xc0000000  /* reserved */
+# define STATUS_REG_TTI           0x20000000  /* time-stamp trigger intrpt */
+# define STATUS_REG_GPI           0x10000000  /* gmac PMT interrupt */
+
+# define STATUS_REG_GMI           0x08000000  /* gmac MMC interrupt */
+# define STATUS_REG_GLI           0x04000000  /* gmac line interface intrpt */
+
+# define STATUS_REG_EB            0x03800000  /* error bits */
+# define STATUS_REG_EB_DATA       0x00800000  /* error during data transfer */
+# define STATUS_REG_EB_RDWR       0x01000000  /* error during rd/wr transfer */
+# define STATUS_REG_EB_DESC       0x02000000  /* error during desc access */
+
+# define STATUS_REG_TS            0x00700000  /* transmit process state */
+
+# define STATUS_REG_TS_STOP       0x00000000  /*   stopped */
+# define STATUS_REG_TS_FETCH_DESC 0x00100000  /*   fetching descriptor */
+# define STATUS_REG_TS_WAIT       0x00200000  /*   waiting for status */
+# define STATUS_REG_TS_READ       0x00300000  /*   reading host memory */
+# define STATUS_REG_TS_TIMESTAMP  0x00400000  /*   timestamp write status */
+# define STATUS_REG_TS_RSVRD      0x00500000  /*   reserved */
+# define STATUS_REG_TS_SUSPEND    0x00600000  /*   desc-unavail/buffer-unflw */
+# define STATUS_REG_TS_CLOSE      0x00700000  /*   closing desc */
+
+# define STATUS_REG_RS            0x000e0000  /* receive process state */
+
+# define STATUS_REG_RS_STOP       0x00000000  /*   stopped */
+# define STATUS_REG_RS_FETCH_DESC 0x00020000  /*   fetching descriptor */
+# define STATUS_REG_RS_RSVRD_1    0x00040000  /*   reserved */
+# define STATUS_REG_RS_WAIT       0x00060000  /*   waiting for packet */
+# define STATUS_REG_RS_SUSPEND    0x00080000  /*   desc unavail */
+# define STATUS_REG_RS_CLOSE      0x000a0000  /*   closing desc */
+# define STATUS_REG_RS_TIMESTAMP  0x000c0000  /*   timestamp write status */
+# define STATUS_REG_RS_RSVRD_2    0x000e0000  /*   writing host memory */
+
+# define STATUS_REG_NIS           0x00010000  /* normal intrpt   14|6|2|0 */
+# define STATUS_REG_AIS           0x00008000  /* intrpts 13|10|9|8|7|5|4|3|1 */
+
+# define STATUS_REG_ERI           0x00004000  /* early receive interrupt */
+# define STATUS_REG_FBI           0x00002000  /* fatal bus error interrupt */
+# define STATUS_REG_RSVRD_2       0x00001800  /* reserved */
+
+# define STATUS_REG_ETI           0x00000400  /* early transmit interrupt */
+# define STATUS_REG_RWT           0x00000200  /* receive watchdog timeout */
+# define STATUS_REG_RPS           0x00000100  /* receive process stopped */
+
+# define STATUS_REG_RU            0x00000080  /* receive buffer unavailable */
+# define STATUS_REG_RI            0x00000040  /* receive interrupt */
+# define STATUS_REG_UNF           0x00000020  /* transmit underflow */
+# define STATUS_REG_OVF           0x00000010  /* receive overflow */
+
+# define STATUS_REG_TJT           0x00000008  /* transmit jabber timeout */
+# define STATUS_REG_TU            0x00000004  /* transmit buffer unavailable */
+# define STATUS_REG_TPS           0x00000002  /* transmit process stopped */
+# define STATUS_REG_TI            0x00000001  /* transmit interrupt */
+
+# define STATUS_REG_AIS_BITS    (STATUS_REG_FBI | STATUS_REG_ETI \
+				| STATUS_REG_RWT | STATUS_REG_RPS \
+				| STATUS_REG_RU | STATUS_REG_UNF \
+				| STATUS_REG_OVF | STATUS_REG_TJT \
+				| STATUS_REG_TPS | STATUS_REG_AIS)
+
+# define OPER_MODE_REG             0x1018
+
+# define OPER_MODE_REG_DT          0x04000000 /* disab drop ip cksum err fr */
+# define OPER_MODE_REG_RSF         0x02000000 /* rec store and forward */
+# define OPER_MODE_REG_DFF         0x01000000 /* disable flush of rec frames */
+
+# define OPER_MODE_REG_RFA2        0x00800000 /* thresh MSB for act flow-ctl */
+# define OPER_MODE_REG_RFD2        0x00400000 /* thresh MSB deAct flow-ctl */
+# define OPER_MODE_REG_TSF         0x00200000 /* tx store and forward */
+# define OPER_MODE_REG_FTF         0x00100000 /* flush tx FIFO */
+
+# define OPER_MODE_REG_RSVD1       0x000e0000 /* reserved */
+# define OPER_MODE_REG_TTC         0x0001c000 /* transmit threshold control */
+# define OPER_MODE_REG_TTC_SET(x)  (OPER_MODE_REG_TTC & (x << 14))
+# define OPER_MODE_REG_ST          0x00002000 /* start/stop transmission cmd */
+
+# define OPER_MODE_REG_RFD         0x00001800 /* thresh for deAct flow-ctl */
+# define OPER_MODE_REG_RFA         0x00000600 /* threshold for act flow-ctl */
+# define OPER_MODE_REG_EFC         0x00000100 /* enable HW flow-ctl */
+
+# define OPER_MODE_REG_FEF         0x00000080 /* forward error frames */
+# define OPER_MODE_REG_FUF         0x00000040 /* forward undersize good fr */
+# define OPER_MODE_REG_RSVD2       0x00000020 /* reserved */
+# define OPER_MODE_REG_RTC         0x00000018 /* receive threshold control */
+# define OPER_MODE_REG_RTC_SET(x)  (OPER_MODE_REG_RTC & (x << 3))
+
+# define OPER_MODE_REG_OSF         0x00000004 /* operate on second frame */
+# define OPER_MODE_REG_SR          0x00000002 /* start/stop receive */
+# define OPER_MODE_REG_RSVD3       0x00000001 /* reserved */
+
+
+#define OPER_MODE_REG_DEFAULT    (OPER_MODE_REG_RSF \
+				| OPER_MODE_REG_TSF \
+				| OPER_MODE_REG_TTC_SET(5) \
+				| OPER_MODE_REG_RTC_SET(1) \
+				| OPER_MODE_REG_OSF)
+
+# define INTRP_EN_REG              0x101c
+
+# define INTRP_EN_REG_RSVD1        0xfffc0000 /* */
+# define INTRP_EN_REG_NIE          0x00010000 /* normal intrpt summ enable */
+
+# define INTRP_EN_REG_AIE          0x00008000 /* abnormal intrpt summary en */
+# define INTRP_EN_REG_ERE          0x00004000 /* early receive intrpt enable */
+# define INTRP_EN_REG_FBE          0x00002000 /* fatal bus error enable */
+
+# define INTRP_EN_REG_RSVD2        0x00001800 /* */
+
+# define INTRP_EN_REG_ETE          0x00000400 /* early tx intrpt enable */
+# define INTRP_EN_REG_RWE          0x00000200 /* rx watchdog timeout enable */
+# define INTRP_EN_REG_RSE          0x00000100 /* rx stopped enable */
+
+# define INTRP_EN_REG_RUE          0x00000080 /* rx buf unavailable enable */
+# define INTRP_EN_REG_RIE          0x00000040 /* rx interrupt enable */
+# define INTRP_EN_REG_UNE          0x00000020 /* underflow interrupt enable */
+# define INTRP_EN_REG_OVE          0x00000010 /* overflow interrupt enable */
+
+# define INTRP_EN_REG_TJE          0x00000008 /* tx jabber timeout enable */
+# define INTRP_EN_REG_TUE          0x00000004 /* tx buf unavailable enable */
+# define INTRP_EN_REG_TSE          0x00000002 /* tx stopped enable */
+# define INTRP_EN_REG_TIE          0x00000001 /* tx interrupt enable */
+
+# define INTRP_EN_REG_All          (~(INTRP_EN_REG_RSVD1))
+
+# define MIS_FR_REG                0x1020
+
+# define MIS_FR_REG_FIFO_OVFL      0x10000000  /* fifo overflow */
+# define MIS_FR_REG_FIFO_CNT       0x0FFE0000  /* fifo cnt */
+
+# define MIS_FR_REG_MISS_OVFL      0x00010000  /* missed-frame overflow */
+# define MIS_FR_REG_MISS_CNT       0x0000FFFF  /* missed-frame cnt */
+
+# define RX_INTRP_WTCHDOG_REG      0x1024
+# define AXI_BUS_MODE_REG          0x1028
+
+# define AXI_BUS_MODE_EN_LPI       0x80000000  /* enable low power interface */
+# define AXI_BUS_MODE_UNLK_MGC_PKT 0x40000000  /* unlock-magic-pkt/rem-wk-up */
+# define AXI_BUS_MODE_WR_OSR_LMT   0x00F00000  /* max wr out stndg req limit */
+# define AXI_BUS_MODE_RD_OSR_LMT   0x000F0000  /* max rd out stndg req limit */
+# define AXI_BUS_MODE_AXI_AAL      0x00001000  /* address aligned beats */
+# define AXI_BUS_MODE_BLEN256      0x00000080  /* axi burst length 256 */
+# define AXI_BUS_MODE_BLEN128      0x00000040  /* axi burst length 128 */
+# define AXI_BUS_MODE_BLEN64       0x00000020  /* axi burst length 64  */
+# define AXI_BUS_MODE_BLEN32       0x00000010  /* axi burst length 32  */
+# define AXI_BUS_MODE_BLEN16       0x00000008  /* axi burst length 16  */
+# define AXI_BUS_MODE_BLEN8        0x00000004  /* axi burst length 8   */
+# define AXI_BUS_MODE_BLEN4        0x00000002  /* axi burst length 4   */
+# define AXI_BUS_MODE_UNDEF        0x00000001  /* axi undef burst length */
+
+#define AXI_BUS_MODE_DEFAULT     (AXI_BUS_MODE_WR_OSR_LMT \
+				| AXI_BUS_MODE_RD_OSR_LMT \
+				| AXI_BUS_MODE_BLEN16 \
+				| AXI_BUS_MODE_BLEN8 \
+				| AXI_BUS_MODE_BLEN4)
+
+# define AXI_STATUS_REG            0x102c
+
+/*     0x1030-0x1044 reserved */
+# define CUR_HOST_TX_DES_REG       0x1048
+# define CUR_HOST_RX_DES_REG       0x104c
+# define CUR_HOST_TX_BU_ADR_REG    0x1050
+# define CUR_HOST_RX_BU_ADR_REG    0x1054
+
+# define HW_FEATURE_REG            0x1058
+
+# define MAC_CONFIG_REG            0x0000
+
+# define MAC_CONFIG_REG_RSVD1      0xf8000000 /* */
+
+# define MAC_CONFIG_REG_SFTERR     0x04000000 /* smii force tx error */
+# define MAC_CONFIG_REG_CST        0x02000000 /* crc strip for type frame */
+# define MAC_CONFIG_REG_TC         0x01000000 /* tx cfg in rgmii/sgmii/smii */
+
+# define MAC_CONFIG_REG_WD         0x00800000 /* watchdog disable */
+# define MAC_CONFIG_REG_JD         0x00400000 /* jabber disable */
+# define MAC_CONFIG_REG_BE         0x00200000 /* frame burst enable */
+# define MAC_CONFIG_REG_JE         0x00100000 /* jumbo frame enable */
+
+# define MAC_CONFIG_REG_IFG        0x000e0000 /* inter frame gap, 96-(8*n) */
+# define MAC_CONFIG_REG_DCRS       0x00010000 /* dis carrier sense during tx */
+
+# define MAC_CONFIG_REG_PS         0x00008000 /* port select: 0/1 g/(10/100) */
+# define MAC_CONFIG_REG_FES        0x00004000 /* speed 100 mbps */
+# define MAC_CONFIG_REG_SPD        (MAC_CONFIG_REG_PS | MAC_CONFIG_REG_FES)
+# define MAC_CONFIG_REG_SPD_1G     (0)
+# define MAC_CONFIG_REG_SPD_100    (MAC_CONFIG_REG_PS | MAC_CONFIG_REG_FES)
+# define MAC_CONFIG_REG_SPD_10     (MAC_CONFIG_REG_PS)
+# define MAC_CONFIG_REG_SPD_SET(x) (MAC_CONFIG_REG_PS_FES & (x << 14))
+
+# define MAC_CONFIG_REG_DO         0x00002000 /* disable receive own */
+# define MAC_CONFIG_REG_LM         0x00001000 /* loopback mode */
+
+# define MAC_CONFIG_REG_DM         0x00000800 /* (full) duplex mode */
+# define MAC_CONFIG_REG_IPC        0x00000400 /* checksum offload */
+# define MAC_CONFIG_REG_DR         0x00000200 /* disable retry */
+# define MAC_CONFIG_REG_LUD        0x00000100 /* link up/down */
+
+# define MAC_CONFIG_REG_ACS        0x00000080 /* auto pad/crc stripping */
+# define MAC_CONFIG_REG_BL         0x00000060 /* back-off limit */
+# define MAC_CONFIG_REG_BL_10      0x00000000 /*          10 */
+# define MAC_CONFIG_REG_BL_8       0x00000020 /*          8  */
+# define MAC_CONFIG_REG_BL_4       0x00000040 /*          4  */
+# define MAC_CONFIG_REG_BL_1       0x00000060 /*          1  */
+# define MAC_CONFIG_REG_DC         0x00000010 /* deferral check */
+
+# define MAC_CONFIG_REG_TE         0x00000008 /* transmitter enable */
+# define MAC_CONFIG_REG_RE         0x00000004 /* receiver enable */
+# define MAC_CONFIG_REG_RSVD2      0x00000003 /* */
+
+# define MAC_FR_FILTER_REG         0x0004
+
+# define MAC_FR_FILTER_RA          0x80000000 /* receive all */
+
+# define MAC_FR_FILTER_HPF         0x00000400 /* hash or perfect filter */
+# define MAC_FR_FILTER_SAF         0x00000200 /* source addr filt en */
+# define MAC_FR_FILTER_SAIF        0x00000100 /* SA inverse filter */
+# define MAC_FR_FILTER_PCF_MASK    0x000000c0 /* pass control frames */
+# define MAC_FR_FILTER_PCF_0       0x00000000 /*    */
+# define MAC_FR_FILTER_PCF_1       0x00000040 /*    */
+# define MAC_FR_FILTER_PCF_2       0x00000080 /*    */
+# define MAC_FR_FILTER_PCF_3       0x000000c0 /*    */
+# define MAC_FR_FILTER_DBF         0x00000020 /* disable broadcast frames */
+# define MAC_FR_FILTER_PM          0x00000010 /* pass all multicast */
+# define MAC_FR_FILTER_DAIF        0x00000008 /* DA inverse filtering */
+# define MAC_FR_FILTER_HMC         0x00000004 /* hash multicast */
+# define MAC_FR_FILTER_HUC         0x00000002 /* hash unicast */
+# define MAC_FR_FILTER_PR          0x00000001 /* promiscuous mode */
+
+# define HASH_TABLE_HIGH_REG       0x0008
+# define HASH_TABLE_LOW_REG        0x000c
+
+# define GMII_ADR_REG              0x0010
+
+# define GMII_ADR_REG_PA           0x0000f800 /* addr bits */
+# define GMII_ADR_REG_GR           0x000007c0 /* addr bits */
+# define GMII_ADR_REG_RSVRD1       0x00000020 /* */
+# define GMII_ADR_REG_CR           0x0000001c /* csr clock range */
+# define GMII_ADR_REG_GW           0x00000002 /* gmii write */
+# define GMII_ADR_REG_GB           0x00000001 /* gmii busy */
+
+# define GMII_ADR_REG_ADR_SET(x)    (GMII_ADR_REG_PA & (x << 11))
+# define GMII_ADR_REG_ADR_GET(x)    ((x & GMII_ADR_REG_PA) >> 11)
+
+# define GMII_ADR_REG_REG_SET(x)    (GMII_ADR_REG_GR & (x << 6))
+# define GMII_ADR_REG_REG_GET(x)    (((x & GMII_ADR_REG_GR) >> 6)
+
+# define GMII_ADR_REG_CSR_SET(x)    (GMII_ADR_REG_CR & (x << 2))
+# define GMII_ADR_REG_CSR_GET(x)    (((x & GMII_ADR_REG_CR) >> 2)
+
+# define GMII_DATA_REG             0x0014
+
+# define GMII_DATA_REG_DATA        0x0000ffff /* gmii data */
+
+# define FLOW_CONTROL_REG          0x0018
+
+# define FLOW_CONTROL_PT           0xFFFF0000 /* pause time */
+# define FLOW_CONTROL_DZPQ         0x00000080 /* disable zero-quanta pause */
+# define FLOW_CONTROL_PLT          0x00000030 /* pause level threshold */
+
+# define FLOW_CONTROL_UP           0x00000008 /* unicast pause frame detect */
+# define FLOW_CONTROL_RFE          0x00000004 /* receive flow control enable */
+# define FLOW_CONTROL_TFE          0x00000002 /* transmit flow control enable */
+# define FLOW_CONTROL_FCB          0x00000001 /* flow control busy (BPA) */
+
+# define VLAN_TAG_REG              0x001c
+
+# define VERSION_REG               0x0020
+
+/* don't define these until HW if finished */
+/* # define VERSION_USER              0x10 */
+/* # define VERSION_QFEC              0x36 */
+
+# define VERSION_REG_USER(x)       (0xFF & (x >> 8))
+# define VERSION_REG_QFEC(x)       (0xFF & x)
+
+# define DEBUG_REG                 0x0024
+
+# define DEBUG_REG_RSVD1           0xfc000000 /* */
+# define DEBUG_REG_TX_FIFO_FULL    0x02000000 /* Tx fifo full */
+# define DEBUG_REG_TX_FIFO_NEMP    0x01000000 /* Tx fifo not empty */
+
+# define DEBUG_REG_RSVD2           0x00800000 /* */
+# define DEBUG_REG_TX_WR_ACTIVE    0x00400000 /* Tx fifo write ctrl active */
+
+# define DEBUG_REG_TX_RD_STATE     0x00300000 /* Tx fifo rd ctrl state */
+# define DEBUG_REG_TX_RD_IDLE      0x00000000 /*         idle */
+# define DEBUG_REG_TX_RD_WAIT      0x00100000 /*         waiting for status */
+# define DEBUG_REG_TX_RD_PASUE     0x00200000 /*         generating pause */
+# define DEBUG_REG_TX_RD_WRTG      0x00300000 /*         wr stat flush fifo */
+
+# define DEBUG_REG_TX_PAUSE        0x00080000 /* Tx in pause condition */
+
+# define DEBUG_REG_TX_CTRL_STATE   0x00060000 /* Tx frame controller state */
+# define DEBUG_REG_TX_CTRL_IDLE    0x00090000 /*         idle */
+# define DEBUG_REG_TX_CTRL_WAIT    0x00020000 /*         waiting for status*/
+# define DEBUG_REG_TX_CTRL_PAUSE   0x00040000 /*         generating pause */
+# define DEBUG_REG_TX_CTRL_XFER    0x00060000 /*         transferring input */
+
+# define DEBUG_REG_TX_ACTIVE       0x00010000 /* Tx actively transmitting */
+# define DEBUG_REG_RSVD3           0x0000fc00 /* */
+
+# define DEBUG_REG_RX_STATE        0x00000300 /* Rx fifo state */
+# define DEBUG_REG_RX_EMPTY        0x00000000 /*         empty */
+# define DEBUG_REG_RX_LOW          0x00000100 /*         below threshold */
+# define DEBUG_REG_RX_HIGH         0x00000200 /*         above threshold */
+# define DEBUG_REG_RX_FULL         0x00000300 /*         full */
+
+# define DEBUG_REG_RSVD4           0x00000080 /* */
+
+# define DEBUG_REG_RX_RD_STATE     0x00000060 /* Rx rd ctrl state */
+# define DEBUG_REG_RX_RD_IDLE      0x00000000 /*         idle */
+# define DEBUG_REG_RX_RD_RDG_FR    0x00000020 /*         reading frame data */
+# define DEBUG_REG_RX_RD_RDG_STA   0x00000040 /*         reading status */
+# define DEBUG_REG_RX_RD_FLUSH     0x00000060 /*         flush fr data/stat */
+
+# define DEBUG_REG_RX_ACTIVE       0x00000010 /* Rx wr ctlr active */
+
+# define DEBUG_REG_RSVD5           0x00000008 /* */
+# define DEBUG_REG_SM_FIFO_RW_STA  0x00000006 /* small fifo rd/wr state */
+# define DEBUG_REG_RX_RECVG        0x00000001 /* Rx actively receiving data */
+
+# define REM_WAKEUP_FR_REG         0x0028
+# define PMT_CTRL_STAT_REG         0x002c
+/*   0x0030-0x0034 reserved */
+
+# define INTRP_STATUS_REG          0x0038
+
+# define INTRP_STATUS_REG_RSVD1    0x0000fc00 /* */
+# define INTRP_STATUS_REG_TSI      0x00000200 /* time stamp int stat */
+# define INTRP_STATUS_REG_RSVD2    0x00000100 /* */
+
+# define INTRP_STATUS_REG_RCOI     0x00000080 /* rec checksum offload int */
+# define INTRP_STATUS_REG_TI       0x00000040 /* tx int stat */
+# define INTRP_STATUS_REG_RI       0x00000020 /* rx int stat */
+# define INTRP_STATUS_REG_NI       0x00000010 /* normal int summary */
+
+# define INTRP_STATUS_REG_PMTI     0x00000008 /* PMT int */
+# define INTRP_STATUS_REG_ANC      0x00000004 /* auto negotiation complete */
+# define INTRP_STATUS_REG_LSC      0x00000002 /* link status change */
+# define INTRP_STATUS_REG_MII      0x00000001 /* rgMii/sgMii int */
+
+# define INTRP_MASK_REG            0x003c
+
+# define INTRP_MASK_REG_RSVD1      0xfc00     /* */
+# define INTRP_MASK_REG_TSIM       0x0200     /* time stamp int mask */
+# define INTRP_MASK_REG_RSVD2      0x01f0     /* */
+
+# define INTRP_MASK_REG_PMTIM      0x0000     /* PMT int mask */
+# define INTRP_MASK_REG_ANCM       0x0000     /* auto negotiation compl mask */
+# define INTRP_MASK_REG_LSCM       0x0000     /* link status change mask */
+# define INTRP_MASK_REG_MIIM       0x0000     /* rgMii/sgMii int mask */
+
+# define MAC_ADR_0_HIGH_REG        0x0040
+# define MAC_ADR_0_LOW_REG         0x0044
+/* additional pairs of registers for MAC addresses 1-15 */
+
+# define AN_CONTROL_REG            0x00c0
+
+# define AN_CONTROL_REG_RSVRD1     0xfff80000 /* */
+# define AN_CONTROL_REG_SGM_RAL    0x00040000 /* sgmii ral control */
+# define AN_CONTROL_REG_LR         0x00020000 /* lock to reference */
+# define AN_CONTROL_REG_ECD        0x00010000 /* enable comma detect */
+
+# define AN_CONTROL_REG_RSVRD2     0x00008000 /* */
+# define AN_CONTROL_REG_ELE        0x00004000 /* external loopback enable */
+# define AN_CONTROL_REG_RSVRD3     0x00002000 /* */
+# define AN_CONTROL_REG_ANE        0x00001000 /* auto negotiation enable */
+
+# define AN_CONTROL_REG_RSRVD4     0x00000c00 /* */
+# define AN_CONTROL_REG_RAN        0x00000200 /* restart auto negotiation */
+# define AN_CONTROL_REG_RSVRD5     0x000001ff /* */
+
+# define AN_STATUS_REG             0x00c4
+
+# define AN_STATUS_REG_RSVRD1      0xfffffe00 /* */
+# define AN_STATUS_REG_ES          0x00000100 /* extended status */
+# define AN_STATUS_REG_RSVRD2      0x000000c0 /* */
+# define AN_STATUS_REG_ANC         0x00000020 /* auto-negotiation complete */
+# define AN_STATUS_REG_RSVRD3      0x00000010 /* */
+# define AN_STATUS_REG_ANA         0x00000008 /* auto-negotiation ability */
+# define AN_STATUS_REG_LS          0x00000004 /* link status */
+# define AN_STATUS_REG_RSVRD4      0x00000003 /* */
+
+# define AN_ADVERTISE_REG          0x00c8
+# define AN_LNK_PRTNR_ABIL_REG     0x00cc
+# define AN_EXPANDSION_REG         0x00d0
+# define TBI_EXT_STATUS_REG        0x00d4
+
+# define SG_RG_SMII_STATUS_REG     0x00d8
+
+# define LINK_STATUS_REG           0x00d8
+
+# define LINK_STATUS_REG_RSVRD1    0xffffffc0 /* */
+# define LINK_STATUS_REG_FCD       0x00000020 /* false carrier detect */
+# define LINK_STATUS_REG_JT        0x00000010 /* jabber timeout */
+# define LINK_STATUS_REG_UP        0x00000008 /* link status */
+
+# define LINK_STATUS_REG_SPD       0x00000006 /* link speed */
+# define LINK_STATUS_REG_SPD_2_5   0x00000000 /* 10M   2.5M * 4 */
+# define LINK_STATUS_REG_SPD_25    0x00000002 /* 100M   25M * 4 */
+# define LINK_STATUS_REG_SPD_125   0x00000004 /* 1G    125M * 8 */
+
+# define LINK_STATUS_REG_F_DUPLEX  0x00000001 /* full duplex */
+
+/*     0x00dc-0x00fc reserved */
+
+/* MMC Register Map is from     0x0100-0x02fc */
+# define MMC_CNTRL_REG             0x0100
+# define MMC_INTR_RX_REG           0x0104
+# define MMC_INTR_TX_REG           0x0108
+# define MMC_INTR_MASK_RX_REG      0x010C
+# define MMC_INTR_MASK_TX_REG      0x0110
+
+/*     0x0300-0x06fc reserved */
+
+/* precision time protocol   time stamp registers */
+
+# define TS_CTL_REG                 0x0700
+
+# define TS_CTL_ATSFC               0x00080000
+# define TS_CTL_TSENMAC             0x00040000
+
+# define TS_CTL_TSCLKTYPE           0x00030000
+# define TS_CTL_TSCLK_ORD           0x00000000
+# define TS_CTL_TSCLK_BND           0x00010000
+# define TS_CTL_TSCLK_ETE           0x00020000
+# define TS_CTL_TSCLK_PTP           0x00030000
+
+# define TS_CTL_TSMSTRENA           0x00008000
+# define TS_CTL_TSEVNTENA           0x00004000
+# define TS_CTL_TSIPV4ENA           0x00002000
+# define TS_CTL_TSIPV6ENA           0x00001000
+
+# define TS_CTL_TSIPENA             0x00000800
+# define TS_CTL_TSVER2ENA           0x00000400
+# define TS_CTL_TSCTRLSSR           0x00000200
+# define TS_CTL_TSENALL             0x00000100
+
+# define TS_CTL_TSADDREG            0x00000020
+# define TS_CTL_TSTRIG              0x00000010
+
+# define TS_CTL_TSUPDT              0x00000008
+# define TS_CTL_TSINIT              0x00000004
+# define TS_CTL_TSCFUPDT            0x00000002
+# define TS_CTL_TSENA               0x00000001
+
+
+# define TS_SUB_SEC_INCR_REG        0x0704
+# define TS_HIGH_REG                0x0708
+# define TS_LOW_REG                 0x070c
+# define TS_HI_UPDT_REG             0x0710
+# define TS_LO_UPDT_REG             0x0714
+# define TS_APPEND_REG              0x0718
+# define TS_TARG_TIME_HIGH_REG      0x071c
+# define TS_TARG_TIME_LOW_REG       0x0720
+# define TS_HIGHER_WD_REG           0x0724
+# define TS_STATUS_REG              0x072c
+
+/*     0x0730-0x07fc reserved */
+
+# define MAC_ADR16_HIGH_REG        0x0800
+# define MAC_ADR16_LOW_REG         0x0804
+/* additional pairs of registers for MAC addresses 17-31 */
+
+# define MAC_ADR_MAX             32
+
+
+# define  QFEC_INTRP_SETUP               (INTRP_EN_REG_AIE    \
+					| INTRP_EN_REG_FBE \
+					| INTRP_EN_REG_RWE \
+					| INTRP_EN_REG_RSE \
+					| INTRP_EN_REG_RUE \
+					| INTRP_EN_REG_UNE \
+					| INTRP_EN_REG_OVE \
+					| INTRP_EN_REG_TJE \
+					| INTRP_EN_REG_TSE \
+					| INTRP_EN_REG_NIE \
+					| INTRP_EN_REG_RIE \
+					| INTRP_EN_REG_TIE)
+
+/*
+ * ASIC Ethernet clock register definitions:
+ *     address offsets and some register definitions
+ */
+
+# define EMAC_CLK_REG_BASE           0x94020000
+
+/*
+ * PHY clock PLL register locations
+ */
+# define ETH_MD_REG                  0x02A4
+# define ETH_NS_REG                  0x02A8
+
+/* definitions of NS_REG control bits
+ */
+# define ETH_NS_SRC_SEL              0x0007
+
+# define ETH_NS_PRE_DIV_MSK          0x0018
+# define ETH_NS_PRE_DIV(x)           (ETH_NS_PRE_DIV_MSK & (x << 3))
+
+# define ETH_NS_MCNTR_MODE_MSK       0x0060
+# define ETH_NS_MCNTR_MODE_BYPASS    0x0000
+# define ETH_NS_MCNTR_MODE_SWALLOW   0x0020
+# define ETH_NS_MCNTR_MODE_DUAL      0x0040
+# define ETH_NS_MCNTR_MODE_SINGLE    0x0060
+
+# define ETH_NS_MCNTR_RST            0x0080
+# define ETH_NS_MCNTR_EN             0x0100
+
+# define EMAC_PTP_NS_CLK_EN          0x0200
+# define EMAC_PTP_NS_CLK_INV         0x0400
+# define EMAC_PTP_NS_ROOT_EN         0x0800
+
+/* clock sources
+ */
+# define CLK_SRC_TCXO                0x0
+# define CLK_SRC_PLL_GLOBAL          0x1
+# define CLK_SRC_PLL_ARM             0x2
+# define CLK_SRC_PLL_QDSP6           0x3
+# define CLK_SRC_PLL_EMAC            0x4
+# define CLK_SRC_EXT_CLK2            0x5
+# define CLK_SRC_EXT_CLK1            0x6
+# define CLK_SRC_CORE_TEST           0x7
+
+# define ETH_MD_M(x)                 (x << 16)
+# define ETH_MD_2D_N(x)              ((~(x) & 0xffff))
+# define ETH_NS_NM(x)                ((~(x) << 16) & 0xffff0000)
+
+/*
+ * PHY interface clock divider
+ */
+# define ETH_X_EN_NS_REG             0x02AC
+
+# define ETH_RX_CLK_FB_INV           0x80
+# define ETH_RX_CLK_FB_EN            0x40
+# define ETH_TX_CLK_FB_INV           0x20
+# define ETH_TX_CLK_FB_EN            0x10
+# define ETH_RX_CLK_INV              0x08
+# define ETH_RX_CLK_EN               0x04
+# define ETH_TX_CLK_INV              0x02
+# define ETH_TX_CLK_EN               0x01
+
+# define ETH_X_EN_NS_DEFAULT \
+	(ETH_RX_CLK_FB_EN | ETH_TX_CLK_FB_EN | ETH_RX_CLK_EN | ETH_TX_CLK_EN)
+
+# define EMAC_PTP_MD_REG             0x02B0
+
+/* PTP clock divider
+ */
+# define EMAC_PTP_NS_REG             0x02B4
+
+/*
+ * clock interface pin controls
+ */
+# define EMAC_NS_REG                 0x02B8
+
+# define EMAC_RX_180_CLK_INV         0x2000
+# define EMAC_RX_180_CLK_EN          0x1000
+# define EMAC_RX_180_CLK_EN_INV      (EMAC_RX_180_CLK_INV | EMAC_RX_180_CLK_EN)
+
+# define EMAC_TX_180_CLK_INV         0x0800
+# define EMAC_TX_180_CLK_EN          0x0400
+# define EMAC_TX_180_CLK_EN_INV      (EMAC_TX_180_CLK_INV | EMAC_TX_180_CLK_EN)
+
+# define EMAC_REVMII_RX_CLK_INV      0x0200
+# define EMAC_REVMII_RX_CLK_EN       0x0100
+
+# define EMAC_RX_CLK_INV             0x0080
+# define EMAC_RX_CLK_EN              0x0040
+
+# define EMAC_REVMII_TX_CLK_INV      0x0020
+# define EMAC_REVMII_TX_CLK_EN       0x0010
+
+# define EMAC_TX_CLK_INV             0x0008
+# define EMAC_TX_CLK_EN              0x0004
+
+# define EMAC_RX_R_CLK_EN            0x0002
+# define EMAC_TX_R_CLK_EN            0x0001
+
+# define EMAC_NS_DEFAULT \
+	(EMAC_RX_180_CLK_EN_INV | EMAC_TX_180_CLK_EN_INV \
+	| EMAC_REVMII_RX_CLK_EN | EMAC_REVMII_TX_CLK_EN \
+	| EMAC_RX_CLK_EN | EMAC_TX_CLK_EN \
+	| EMAC_RX_R_CLK_EN | EMAC_TX_R_CLK_EN)
+
+/*
+ *
+ */
+# define EMAC_TX_FS_REG              0x02BC
+# define EMAC_RX_FS_REG              0x02C0
+
+/*
+ * Ethernet controller PHY interface select
+ */
+# define EMAC_PHY_INTF_SEL_REG       0x18030
+
+# define EMAC_PHY_INTF_SEL_MII       0x0
+# define EMAC_PHY_INTF_SEL_RGMII     0x1
+# define EMAC_PHY_INTF_SEL_REVMII    0x7
+# define EMAC_PHY_INTF_SEL_MASK      0x7
+
+/*
+ * MDIO addresses
+ */
+# define EMAC_PHY_ADDR_REG           0x18034
+# define EMAC_REVMII_PHY_ADDR_REG    0x18038
+
+/*
+ * clock routing
+ */
+# define EMAC_CLKMUX_SEL_REG         0x1803c
+
+# define EMAC_CLKMUX_SEL_0           0x1
+# define EMAC_CLKMUX_SEL_1           0x2
+
+
+#endif
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 5f53fbb..1e1617e 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -223,6 +223,20 @@
 #define SMC_outsl(a, r, p, l)	writesl((a) + (r), p, l)
 #define SMC_IRQ_FLAGS		(-1)	/* from resource */
 
+#elif defined(CONFIG_ARCH_MSM)
+
+#define SMC_CAN_USE_8BIT	0
+#define SMC_CAN_USE_16BIT	1
+#define SMC_CAN_USE_32BIT	0
+#define SMC_NOWAIT		1
+
+#define SMC_inw(a, r)		readw((a) + (r))
+#define SMC_outw(v, a, r)	writew(v, (a) + (r))
+#define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS		IRQF_TRIGGER_HIGH
+
 #elif defined(CONFIG_MN10300)
 
 /*
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index c6d47d1..1064aa0 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -42,6 +42,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/gpio.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/bug.h>
@@ -872,7 +873,7 @@
 			    (!pdata->using_extphy)) {
 				/* Restore original GPIO configuration */
 				pdata->gpio_setting = pdata->gpio_orig_setting;
-				smsc911x_reg_write(pdata, GPIO_CFG,
+				smsc911x_reg_write(pdata, SMSC_GPIO_CFG,
 					pdata->gpio_setting);
 			}
 		} else {
@@ -880,7 +881,7 @@
 			/* Check global setting that LED1
 			 * usage is 10/100 indicator */
 			pdata->gpio_setting = smsc911x_reg_read(pdata,
-				GPIO_CFG);
+				SMSC_GPIO_CFG);
 			if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) &&
 			    (!pdata->using_extphy)) {
 				/* Force 10/100 LED off, after saving
@@ -891,7 +892,7 @@
 				pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_
 							| GPIO_CFG_GPIODIR0_
 							| GPIO_CFG_GPIOD0_);
-				smsc911x_reg_write(pdata, GPIO_CFG,
+				smsc911x_reg_write(pdata, SMSC_GPIO_CFG,
 					pdata->gpio_setting);
 			}
 		}
@@ -1314,7 +1315,7 @@
 		SMSC_WARN(pdata, ifup,
 			  "Timed out waiting for EEPROM busy bit to clear");
 
-	smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000);
+	smsc911x_reg_write(pdata, SMSC_GPIO_CFG, 0x70070000);
 
 	/* The soft reset above cleared the device's MAC address,
 	 * restore it from local copy (set in probe) */
@@ -1758,9 +1759,9 @@
 
 static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata)
 {
-	unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG);
+	unsigned int temp = smsc911x_reg_read(pdata, SMSC_GPIO_CFG);
 	temp &= ~GPIO_CFG_EEPR_EN_;
-	smsc911x_reg_write(pdata, GPIO_CFG, temp);
+	smsc911x_reg_write(pdata, SMSC_GPIO_CFG, temp);
 	msleep(1);
 }
 
@@ -2055,6 +2056,10 @@
 
 	SMSC_TRACE(pdata, ifdown, "Stopping driver");
 
+	if (pdata->config.has_reset_gpio) {
+		gpio_set_value_cansleep(pdata->config.reset_gpio, 0);
+		gpio_free(pdata->config.reset_gpio);
+	}
 	phy_disconnect(pdata->phy_dev);
 	pdata->phy_dev = NULL;
 	mdiobus_unregister(pdata->mii_bus);
@@ -2185,7 +2190,7 @@
 	smsc911x_reg_write(pdata, INT_EN, 0);
 	smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
 
-	retval = request_irq(dev->irq, smsc911x_irqhandler,
+	retval = request_any_context_irq(dev->irq, smsc911x_irqhandler,
 			     irq_flags | IRQF_SHARED, dev->name, dev);
 	if (retval) {
 		SMSC_WARN(pdata, probe,
@@ -2277,6 +2282,10 @@
 		PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ |
 		PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_);
 
+	/* Drive the GPIO Ethernet_Reset Line low to Suspend */
+	if (pdata->config.has_reset_gpio)
+		gpio_set_value_cansleep(pdata->config.reset_gpio, 0);
+
 	return 0;
 }
 
@@ -2286,6 +2295,9 @@
 	struct smsc911x_data *pdata = netdev_priv(ndev);
 	unsigned int to = 100;
 
+	if (pdata->config.has_reset_gpio)
+		gpio_set_value_cansleep(pdata->config.reset_gpio, 1);
+
 	/* Note 3.11 from the datasheet:
 	 * 	"When the LAN9220 is in a power saving state, a write of any
 	 * 	 data to the BYTE_TEST register will wake-up the device."
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 8d67aac..19711d2 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -236,7 +236,7 @@
 #define PMT_CTRL_PME_EN_		0x00000002
 #define PMT_CTRL_READY_			0x00000001
 
-#define GPIO_CFG			0x88
+#define SMSC_GPIO_CFG			0x88
 #define GPIO_CFG_LED3_EN_		0x40000000
 #define GPIO_CFG_LED2_EN_		0x20000000
 #define GPIO_CFG_LED1_EN_		0x10000000
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index f1d88c5..50dacf9 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -273,6 +273,26 @@
 	help
 	  Enables Power/Reset/Carddetect function abstraction
 
+config LIBRA_SDIOIF
+	tristate "Qualcomm libra wlan SDIO driver"
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	select WEXT_CORE
+	select WEXT_SPY
+	depends on MMC_MSM
+	---help---
+	  A driver for Qualcomm WLAN SDIO Libra chipset.
+
+config WCNSS_WLAN
+	tristate "Qualcomm WCNSS WLAN driver"
+	depends on ARCH_MSM8960
+	select WIRELESS_EXT
+	select WEXT_PRIV
+	select WEXT_CORE
+	select WEXT_SPY
+	---help---
+	  A driver for Qualcomm WCNSS WLAN feature
+
 source "drivers/net/wireless/ath/Kconfig"
 source "drivers/net/wireless/b43/Kconfig"
 source "drivers/net/wireless/b43legacy/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 8ceae0a..f725adf 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -61,3 +61,6 @@
 
 obj-$(CONFIG_BCM4329)	+= bcm4329/
 obj-$(CONFIG_BCMDHD)	+= bcmdhd/
+
+obj-$(CONFIG_LIBRA_SDIOIF)	+= libra/
+obj-$(CONFIG_WCNSS_WLAN)	+= wcnss/
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1be7c8b..82fb6ce 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -506,6 +506,17 @@
 	ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
 			 AR_WA_ASPM_TIMER_BASED_DISABLE);
 
+	/*
+	 * Read back AR_WA into a permanent copy and set bits 14 and 17.
+	 * We need to do this to avoid RMW of this register. We cannot
+	 * read the reg when chip is asleep.
+	 */
+	ah->WARegVal = REG_READ(ah, AR_WA);
+	ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+			 AR_WA_ASPM_TIMER_BASED_DISABLE);
+
+	ath9k_hw_read_revisions(ah);
+
 	if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
 		ath_err(common, "Couldn't reset chip\n");
 		return -EIO;
diff --git a/drivers/net/wireless/bcm4329/bcmspibrcm.c b/drivers/net/wireless/bcm4329/bcmspibrcm.c
new file mode 100644
index 0000000..0f131a4
--- /dev/null
+++ b/drivers/net/wireless/bcm4329/bcmspibrcm.c
@@ -0,0 +1,1726 @@
+/*
+ * Broadcom BCMSDH to gSPI Protocol Conversion Layer
+ *
+ * Copyright (C) 2010, Broadcom Corporation
+ * All Rights Reserved.
+ * 
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * $Id: bcmspibrcm.c,v 1.11.2.10.2.9.6.11 2009/05/21 13:21:57 Exp $
+ */
+
+#define HSMODE
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <sbchipc.h>
+#include <sbsdio.h>
+#include <spid.h>
+
+#include <bcmsdbus.h>	/* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h>	/* ioctl/iovars */
+#include <sdio.h>
+
+#include <pcicfg.h>
+
+
+#include <bcmspibrcm.h>
+#include <bcmspi.h>
+
+#define F0_RESPONSE_DELAY	16
+#define F1_RESPONSE_DELAY	16
+#define F2_RESPONSE_DELAY	F0_RESPONSE_DELAY
+
+#define CMDLEN		4
+
+#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE)
+
+/* Globals */
+uint sd_msglevel = 0;
+
+uint sd_hiok = FALSE;		/* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SPI;		/* Use SD4 mode by default */
+uint sd_f2_blocksize = 64;		/* Default blocksize */
+
+
+uint sd_divisor = 2;
+uint sd_power = 1;		/* Default to SD Slot powered ON */
+uint sd_clock = 1;		/* Default to SD Clock turned ON */
+uint sd_crc = 0;		/* Default to SPI CRC Check turned OFF */
+
+uint8	spi_outbuf[SPI_MAX_PKT_LEN];
+uint8	spi_inbuf[SPI_MAX_PKT_LEN];
+
+/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits
+ * assuming we will not exceed F0 response delay > 100 bytes at 48MHz.
+ */
+#define BUF2_PKT_LEN	128
+uint8	spi_outbuf2[BUF2_PKT_LEN];
+uint8	spi_inbuf2[BUF2_PKT_LEN];
+
+/* Prototypes */
+static bool bcmspi_test_card(sdioh_info_t *sd);
+static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd);
+static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                           uint32 *data, uint32 datalen);
+static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+                              int regsize, uint32 *data);
+static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               int regsize, uint32 data);
+static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr,
+                               uint8 *data);
+static int bcmspi_driver_init(sdioh_info_t *sd);
+static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                          uint32 addr, int nbytes, uint32 *data);
+static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize,
+                                 uint32 *data);
+static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer);
+static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg);
+
+/*
+ *  Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+	sdioh_info_t *sd;
+
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+		sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+		return NULL;
+	}
+	bzero((char *)sd, sizeof(sdioh_info_t));
+	sd->osh = osh;
+	if (spi_osinit(sd) != 0) {
+		sd_err(("%s: spi_osinit() failed\n", __FUNCTION__));
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return NULL;
+	}
+
+	sd->bar0 = bar0;
+	sd->irq = irq;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	sd->intr_handler_valid = FALSE;
+
+	/* Set defaults */
+	sd->use_client_ints = TRUE;
+	sd->sd_use_dma = FALSE;	/* DMA Not supported */
+
+	/* Spi device default is 16bit mode, change to 4 when device is changed to 32bit
+	 * mode
+	 */
+	sd->wordlen = 2;
+
+	if (!spi_hw_attach(sd)) {
+		sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (bcmspi_driver_init(sd) != SUCCESS) {
+		sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	if (spi_register_irq(sd, irq) != SUCCESS) {
+		sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+		return (NULL);
+	}
+
+	sd_trace(("%s: Done\n", __FUNCTION__));
+
+	return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if (sd) {
+		sd_err(("%s: detaching from hardware\n", __FUNCTION__));
+		spi_free_irq(sd->irq, sd);
+		spi_hw_detach(sd);
+		spi_osfree(sd);
+		MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+	}
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	sd->intr_handler = fn;
+	sd->intr_handler_arg = argh;
+	sd->intr_handler_valid = TRUE;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	sd->intr_handler_valid = FALSE;
+	sd->intr_handler = NULL;
+	sd->intr_handler_arg = NULL;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+	sd_trace(("%s: Entering\n", __FUNCTION__));
+	*onoff = sd->client_intr_enabled;
+	return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+	return 0;
+}
+#endif
+
+extern SDIOH_API_RC
+sdioh_query_device(sdioh_info_t *sd)
+{
+	/* Return a BRCM ID appropriate to the dongle class */
+	return (sd->num_funcs > 1) ? BCM4329_D11NDUAL_ID : BCM4318_D11G_ID;
+}
+
+/* Provide dstatus bits of spi-transaction for dhd layers. */
+extern uint32
+sdioh_get_dstatus(sdioh_info_t *sd)
+{
+	return sd->card_dstatus;
+}
+
+extern void
+sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev)
+{
+	sd->chip = chip;
+	sd->chiprev = chiprev;
+}
+
+extern void
+sdioh_dwordmode(sdioh_info_t *sd, bool set)
+{
+	uint8 reg = 0;
+	int status;
+
+	if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+
+	if (set) {
+		reg |= DWORD_PKT_LEN_EN;
+		sd->dwordmode = TRUE;
+		sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */
+	} else {
+		reg &= ~DWORD_PKT_LEN_EN;
+		sd->dwordmode = FALSE;
+		sd->client_block_size[SPI_FUNC_2] = 2048;
+	}
+
+	if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, &reg)) !=
+	     SUCCESS) {
+		sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+		return;
+	}
+}
+
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+	return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+	IOV_MSGLEVEL = 1,
+	IOV_BLOCKMODE,
+	IOV_BLOCKSIZE,
+	IOV_DMA,
+	IOV_USEINTS,
+	IOV_NUMINTS,
+	IOV_NUMLOCALINTS,
+	IOV_HOSTREG,
+	IOV_DEVREG,
+	IOV_DIVISOR,
+	IOV_SDMODE,
+	IOV_HISPEED,
+	IOV_HCIREGS,
+	IOV_POWER,
+	IOV_CLOCK,
+	IOV_SPIERRSTATS,
+	IOV_RESP_DELAY_ALL
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+	{"sd_msglevel",	IOV_MSGLEVEL, 	0,	IOVT_UINT32,	0 },
+	{"sd_blocksize", IOV_BLOCKSIZE, 0,	IOVT_UINT32,	0 }, /* ((fn << 16) | size) */
+	{"sd_dma",	IOV_DMA,	0,	IOVT_BOOL,	0 },
+	{"sd_ints",	IOV_USEINTS,	0,	IOVT_BOOL,	0 },
+	{"sd_numints",	IOV_NUMINTS,	0,	IOVT_UINT32,	0 },
+	{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32,	0 },
+	{"sd_hostreg",	IOV_HOSTREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t) },
+	{"sd_devreg",	IOV_DEVREG,	0,	IOVT_BUFFER,	sizeof(sdreg_t)	},
+	{"sd_divisor",	IOV_DIVISOR,	0,	IOVT_UINT32,	0 },
+	{"sd_power",	IOV_POWER,	0,	IOVT_UINT32,	0 },
+	{"sd_clock",	IOV_CLOCK,	0,	IOVT_UINT32,	0 },
+	{"sd_mode",	IOV_SDMODE,	0,	IOVT_UINT32,	100},
+	{"sd_highspeed",	IOV_HISPEED,	0,	IOVT_UINT32,	0},
+	{"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) },
+	{"spi_respdelay",	IOV_RESP_DELAY_ALL,	0,	IOVT_BOOL,	0 },
+	{NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+               void *params, int plen, void *arg, int len, bool set)
+{
+	const bcm_iovar_t *vi = NULL;
+	int bcmerror = 0;
+	int val_size;
+	int32 int_val = 0;
+	bool bool_val;
+	uint32 actionid;
+/*
+	sdioh_regs_t *regs;
+*/
+
+	ASSERT(name);
+	ASSERT(len >= 0);
+
+	/* Get must have return space; Set does not take qualifiers */
+	ASSERT(set || (arg && len));
+	ASSERT(!set || (!params && !plen));
+
+	sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+	if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+		bcmerror = BCME_UNSUPPORTED;
+		goto exit;
+	}
+
+	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+		goto exit;
+
+	/* Set up params so get and set can share the convenience variables */
+	if (params == NULL) {
+		params = arg;
+		plen = len;
+	}
+
+	if (vi->type == IOVT_VOID)
+		val_size = 0;
+	else if (vi->type == IOVT_BUFFER)
+		val_size = len;
+	else
+		val_size = sizeof(int);
+
+	if (plen >= (int)sizeof(int_val))
+		bcopy(params, &int_val, sizeof(int_val));
+
+	bool_val = (int_val != 0) ? TRUE : FALSE;
+
+	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+	switch (actionid) {
+	case IOV_GVAL(IOV_MSGLEVEL):
+		int_val = (int32)sd_msglevel;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_MSGLEVEL):
+		sd_msglevel = int_val;
+		break;
+
+	case IOV_GVAL(IOV_BLOCKSIZE):
+		if ((uint32)int_val > si->num_funcs) {
+			bcmerror = BCME_BADARG;
+			break;
+		}
+		int_val = (int32)si->client_block_size[int_val];
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_DMA):
+		int_val = (int32)si->sd_use_dma;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DMA):
+		si->sd_use_dma = (bool)int_val;
+		break;
+
+	case IOV_GVAL(IOV_USEINTS):
+		int_val = (int32)si->use_client_ints;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_USEINTS):
+		break;
+
+	case IOV_GVAL(IOV_DIVISOR):
+		int_val = (uint32)sd_divisor;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_DIVISOR):
+		sd_divisor = int_val;
+		if (!spi_start_clock(si, (uint16)sd_divisor)) {
+			sd_err(("%s: set clock failed\n", __FUNCTION__));
+			bcmerror = BCME_ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_POWER):
+		int_val = (uint32)sd_power;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_POWER):
+		sd_power = int_val;
+		break;
+
+	case IOV_GVAL(IOV_CLOCK):
+		int_val = (uint32)sd_clock;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_CLOCK):
+		sd_clock = int_val;
+		break;
+
+	case IOV_GVAL(IOV_SDMODE):
+		int_val = (uint32)sd_sdmode;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_SDMODE):
+		sd_sdmode = int_val;
+		break;
+
+	case IOV_GVAL(IOV_HISPEED):
+		int_val = (uint32)sd_hiok;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_HISPEED):
+		sd_hiok = int_val;
+
+		if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) {
+			sd_err(("%s: Failed changing highspeed mode to %d.\n",
+			        __FUNCTION__, sd_hiok));
+			bcmerror = BCME_ERROR;
+			return ERROR;
+		}
+		break;
+
+	case IOV_GVAL(IOV_NUMINTS):
+		int_val = (int32)si->intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_GVAL(IOV_NUMLOCALINTS):
+		int_val = (int32)si->local_intrcount;
+		bcopy(&int_val, arg, val_size);
+		break;
+	case IOV_GVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data;
+
+		if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+
+		int_val = (int)data;
+		bcopy(&int_val, arg, sizeof(int_val));
+		break;
+	}
+
+	case IOV_SVAL(IOV_DEVREG):
+	{
+		sdreg_t *sd_ptr = (sdreg_t *)params;
+		uint8 data = (uint8)sd_ptr->value;
+
+		if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+	}
+
+
+	case IOV_GVAL(IOV_SPIERRSTATS):
+	{
+		bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_SVAL(IOV_SPIERRSTATS):
+	{
+		bzero(&si->spierrstats, sizeof(struct spierrstats_t));
+		break;
+	}
+
+	case IOV_GVAL(IOV_RESP_DELAY_ALL):
+		int_val = (int32)si->resp_delay_all;
+		bcopy(&int_val, arg, val_size);
+		break;
+
+	case IOV_SVAL(IOV_RESP_DELAY_ALL):
+		si->resp_delay_all = (bool)int_val;
+		int_val = STATUS_ENABLE|INTR_WITH_STATUS;
+		if (si->resp_delay_all)
+			int_val |= RESP_DELAY_ALL;
+		else {
+			if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1,
+			     F1_RESPONSE_DELAY) != SUCCESS) {
+				sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+				bcmerror = BCME_SDIO_ERROR;
+				break;
+			}
+		}
+
+		if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val)
+		     != SUCCESS) {
+			sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+			bcmerror = BCME_SDIO_ERROR;
+			break;
+		}
+		break;
+
+	default:
+		bcmerror = BCME_UNSUPPORTED;
+		break;
+	}
+exit:
+
+	return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	SDIOH_API_RC status;
+	/* No lock needed since sdioh_request_byte does locking */
+	status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+	/* No lock needed since sdioh_request_byte does locking */
+	SDIOH_API_RC status;
+
+	if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) {
+		uint8 dummy_data;
+		status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data);
+		if (status) {
+			sd_err(("sdioh_cfg_read() failed.\n"));
+			return status;
+		}
+	}
+
+	status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+	return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+	uint32 count;
+	int offset;
+	uint32 cis_byte;
+	uint16 *cis = (uint16 *)cisd;
+	uint bar0 = SI_ENUM_BASE;
+	int status;
+	uint8 data;
+
+	sd_trace(("%s: Func %d\n", __FUNCTION__, func));
+
+	spi_lock(sd);
+
+	/* Set sb window address to 0x18000000 */
+	data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK;
+	status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data);
+	if (status == SUCCESS) {
+		data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+	if (status == SUCCESS) {
+		data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK;
+		status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data);
+	} else {
+		sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+		spi_unlock(sd);
+		return (BCME_ERROR);
+	}
+
+	offset =  CC_OTP; /* OTP offset in chipcommon. */
+	for (count = 0; count < length/2; count++) {
+		if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) {
+			sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+			spi_unlock(sd);
+			return (BCME_ERROR);
+		}
+
+		*cis = (uint16)cis_byte;
+		cis++;
+		offset += 2;
+	}
+
+	spi_unlock(sd);
+
+	return (BCME_OK);
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	spi_lock(sd);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, rw, func,
+	         regaddr, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma,
+	                              cmd_arg, &data, 1)) != SUCCESS) {
+		spi_unlock(sd);
+		return status;
+	}
+
+	if (rw == SDIOH_READ)
+		*byte = (uint8)data;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+                   uint32 *word, uint nbytes)
+{
+	int status;
+
+	spi_lock(sd);
+
+	if (rw == SDIOH_READ)
+		status = bcmspi_card_regread(sd, func, addr, nbytes, word);
+	else
+		status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word);
+
+	spi_unlock(sd);
+	return (status == SUCCESS ?  SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+                     uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+	int len;
+	int buflen = (int)buflen_u;
+	bool fifo = (fix_inc == SDIOH_DATA_FIX);
+
+	spi_lock(sd);
+
+	ASSERT(reg_width == 4);
+	ASSERT(buflen_u < (1 << 30));
+	ASSERT(sd->client_block_size[func]);
+
+	sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+	         __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+	         buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+	/* Break buffer down into blocksize chunks. */
+	while (buflen > 0) {
+		len = MIN(sd->client_block_size[func], buflen);
+		if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+			sd_err(("%s: bcmspi_card_buf %s failed\n",
+				__FUNCTION__, rw == SDIOH_READ ? "Read" : "Write"));
+			spi_unlock(sd);
+			return SDIOH_API_RC_FAIL;
+		}
+		buffer += len;
+		buflen -= len;
+		if (!fifo)
+			addr += len;
+	}
+	spi_unlock(sd);
+	return SDIOH_API_RC_SUCCESS;
+}
+
+/* This function allows write to gspi bus when another rd/wr function is deep down the call stack.
+ * Its main aim is to have simpler spi writes rather than recursive writes.
+ * e.g. When there is a need to program response delay on the fly after detecting the SPI-func
+ * this call will allow to program the response delay.
+ */
+static int
+bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte)
+{
+	uint32 cmd_arg;
+	uint32 datalen = 1;
+	uint32 hostlen;
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf2 = bcmswap32(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint16 *)spi_outbuf2 = bcmswap16(cmd_arg & 0xffff);
+		*(uint16 *)&spi_outbuf2[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16);
+		if (datalen & 0x1)
+			datalen++;
+	} else {
+		sd_err(("%s: Host is %d bit spid, could not create SPI command.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer  */
+	if (datalen != 0) {
+			if (sd->wordlen == 4) { /* 32bit spid */
+				*(uint32 *)&spi_outbuf2[CMDLEN] = bcmswap32(byte);
+			} else if (sd->wordlen == 2) { /* 16bit spid */
+				*(uint16 *)&spi_outbuf2[CMDLEN] = bcmswap16(byte & 0xffff);
+				*(uint16 *)&spi_outbuf2[CMDLEN + 2] =
+					bcmswap16((byte & 0xffff0000) >> 16);
+			}
+	}
+
+	/* +4 for cmd, +4 for dstatus */
+	hostlen = datalen + 8;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) |
+		                   (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16));
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+/* Program the response delay corresponding to the spi function */
+static int
+bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay)
+{
+	if (sd->resp_delay_all == FALSE)
+		return (BCME_OK);
+
+	if (sd->prev_fun == func)
+		return (BCME_OK);
+
+	if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY)
+		return (BCME_OK);
+
+	bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay);
+
+	/* Remember function for which to avoid reprogramming resp-delay in next iteration */
+	sd->prev_fun = func;
+
+	return (BCME_OK);
+
+}
+
+#define GSPI_RESYNC_PATTERN	0x0
+
+/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI.
+ * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is
+ * synchronised and all queued resuests are cancelled.
+ */
+static int
+bcmspi_resync_f1(sdioh_info_t *sd)
+{
+	uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0;
+
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+	datalen = ROUNDUP(datalen, sd->wordlen);
+
+	/* Start by copying command in the spi-outbuffer */
+	*(uint32 *)spi_outbuf2 = cmd_arg;
+
+	/* for Write, put the data into the output buffer  */
+	*(uint32 *)&spi_outbuf2[CMDLEN] = data;
+
+	/* +4 for cmd, +4 for dstatus */
+	spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8);
+
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) |
+		                   (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16));
+	} else {
+		sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+		        __FUNCTION__, 8 * sd->wordlen));
+		return ERROR;
+	}
+
+	if (sd->card_dstatus)
+		sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus));
+
+	return (BCME_OK);
+}
+
+uint32 dstatus_count = 0;
+
+static int
+bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg)
+{
+	uint32 dstatus = sd->card_dstatus;
+	struct spierrstats_t *spierrstats = &sd->spierrstats;
+	int err = SUCCESS;
+
+	sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus));
+
+	/* Store dstatus of last few gSPI transactions */
+	spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus;
+	spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg;
+	dstatus_count++;
+
+	if (sd->card_init_done == FALSE)
+		return err;
+
+	if (dstatus & STATUS_DATA_NOT_AVAILABLE) {
+		spierrstats->dna++;
+		sd_trace(("Read data not available on F1 addr = 0x%x\n",
+		        GFIELD(cmd_arg, SPI_REG_ADDR)));
+		/* Clear dna bit */
+		bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE);
+	}
+
+	if (dstatus & STATUS_UNDERFLOW) {
+		spierrstats->rdunderflow++;
+		sd_err(("FIFO underflow happened due to current F2 read command.\n"));
+	}
+
+	if (dstatus & STATUS_OVERFLOW) {
+		spierrstats->wroverflow++;
+		sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n"));
+		if ((sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 0)) {
+			bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW);
+			bcmspi_resync_f1(sd);
+			sd_err(("Recovering from F1 FIFO overflow.\n"));
+		} else {
+			err = ERROR_OF;
+		}
+	}
+
+	if (dstatus & STATUS_F2_INTR) {
+		spierrstats->f2interrupt++;
+		sd_trace(("Interrupt from F2.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_F3_INTR) {
+		spierrstats->f3interrupt++;
+		sd_err(("Interrupt from F3.  SW should clear corresponding IntStatus bits\n"));
+	}
+
+	if (dstatus & STATUS_HOST_CMD_DATA_ERR) {
+		spierrstats->hostcmddataerr++;
+		sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n"));
+	}
+
+	if (dstatus & STATUS_F2_PKT_AVAILABLE) {
+		spierrstats->f2pktavailable++;
+		sd_trace(("Packet is available/ready in F2 TX FIFO\n"));
+		sd_trace(("Packet length = %d\n", sd->dwordmode ?
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) :
+		         ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT)));
+	}
+
+	if (dstatus & STATUS_F3_PKT_AVAILABLE) {
+		spierrstats->f3pktavailable++;
+		sd_err(("Packet is available/ready in F3 TX FIFO\n"));
+		sd_err(("Packet length = %d\n",
+		        (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT));
+	}
+
+	return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+	return 0;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+	return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+	return SUCCESS;
+}
+
+
+
+/*
+ * Private/Static work routines
+ */
+static int
+bcmspi_host_init(sdioh_info_t *sd)
+{
+
+	/* Default power on mode */
+	sd->sd_mode = SDIOH_MODE_SPI;
+	sd->polled_mode = TRUE;
+	sd->host_init_done = TRUE;
+	sd->card_init_done = FALSE;
+	sd->adapter_slot = 1;
+
+	return (SUCCESS);
+}
+
+static int
+get_client_blocksize(sdioh_info_t *sd)
+{
+	uint32 regdata[2];
+	int status;
+
+	/* Find F1/F2/F3 max packet size */
+	if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG,
+	                                 8, regdata)) != SUCCESS) {
+		return status;
+	}
+
+	sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n",
+	        regdata[0], regdata[1]));
+
+	sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1]));
+	ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1);
+
+	sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2]));
+	ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2);
+
+	sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2;
+	sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3]));
+	ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3);
+
+	return 0;
+}
+
+static int
+bcmspi_client_init(sdioh_info_t *sd)
+{
+	uint32	status_en_reg = 0;
+	sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+#ifdef HSMODE
+	if (!spi_start_clock(sd, (uint16)sd_divisor)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#else
+	/* Start at ~400KHz clock rate for initialization */
+	if (!spi_start_clock(sd, 128)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	if (!bcmspi_host_device_init_adapt(sd)) {
+		sd_err(("bcmspi_host_device_init_adapt failed\n"));
+		return ERROR;
+	}
+
+	if (!bcmspi_test_card(sd)) {
+		sd_err(("bcmspi_test_card failed\n"));
+		return ERROR;
+	}
+
+	sd->num_funcs = SPI_MAX_IOFUNCS;
+
+	get_client_blocksize(sd);
+
+	/* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */
+	bcmspi_resync_f1(sd);
+
+	sd->dwordmode = FALSE;
+
+	bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg);
+
+	sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__));
+	status_en_reg |= INTR_WITH_STATUS;
+
+
+	if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1,
+	    status_en_reg & 0xff) != SUCCESS) {
+		sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__));
+		return ERROR;
+	}
+
+
+#ifndef HSMODE
+	/* After configuring for High-Speed mode, set the desired clock rate. */
+	if (!spi_start_clock(sd, 4)) {
+		sd_err(("spi_start_clock failed\n"));
+		return ERROR;
+	}
+#endif /* HSMODE */
+
+	sd->card_init_done = TRUE;
+
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG,
+	                                 4, &regdata)) != SUCCESS)
+		return status;
+
+	sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata));
+
+
+	if (hsmode == TRUE) {
+		sd_trace(("Attempting to enable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			sd_trace(("Device is already in High-Speed mode.\n"));
+			return status;
+		} else {
+			regdata |= HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS) {
+				return status;
+			}
+		}
+	} else {
+		sd_trace(("Attempting to disable High-Speed mode.\n"));
+
+		if (regdata & HIGH_SPEED_MODE) {
+			regdata &= ~HIGH_SPEED_MODE;
+			sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+			if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+			                                  4, regdata)) != SUCCESS)
+				return status;
+		}
+		 else {
+			sd_trace(("Device is already in Low-Speed mode.\n"));
+			return status;
+		}
+	}
+
+	spi_controller_highspeed_mode(sd, hsmode);
+
+	return TRUE;
+}
+
+#define bcmspi_find_curr_mode(sd) { \
+	sd->wordlen = 2; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd->wordlen = 4; \
+	status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, &regdata); \
+	regdata &= 0xff; \
+	if ((regdata == 0xad) || (regdata == 0x5b) || \
+	    (regdata == 0x5d) || (regdata == 0x5a)) \
+		break; \
+	sd_trace(("Silicon testability issue: regdata = 0x%x." \
+	          " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata));	\
+	OSL_DELAY(100000); \
+}
+
+#define INIT_ADAPT_LOOP		100
+
+/* Adapt clock-phase-speed-bitwidth between host and device */
+static bool
+bcmspi_host_device_init_adapt(sdioh_info_t *sd)
+{
+	uint32 wrregdata, regdata = 0;
+	int status;
+	int i;
+
+	/* Due to a silicon testability issue, the first command from the Host
+	 * to the device will get corrupted (first bit will be lost). So the
+	 * Host should poll the device with a safe read request. ie: The Host
+	 * should try to read F0 addr 0x14 using the Fixed address mode
+	 * (This will prevent a unintended write command to be detected by device)
+	 */
+	for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+		/* If device was not power-cycled it will stay in 32bit mode with
+		 * response-delay-all bit set.  Alternate the iteration so that
+		 * read either with or without response-delay for F0 to succeed.
+		 */
+		bcmspi_find_curr_mode(sd);
+		sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = TRUE;
+
+		bcmspi_find_curr_mode(sd);
+		sd->dwordmode = FALSE;
+	}
+
+	/* Bail out, device not detected */
+	if (i == INIT_ADAPT_LOOP)
+		return FALSE;
+
+	/* Softreset the spid logic */
+	if ((sd->dwordmode) || (sd->wordlen == 4)) {
+		bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI);
+		bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, &regdata);
+		sd_trace(("reset reg read = 0x%x\n", regdata));
+		sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode,
+		       sd->wordlen, sd->resp_delay_all));
+		/* Restore default state after softreset */
+		sd->wordlen = 2;
+		sd->dwordmode = FALSE;
+	}
+
+	if (sd->wordlen == 4) {
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) !=
+		     SUCCESS)
+				return FALSE;
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n",
+			          regdata));
+			sd_trace(("Spid power was left on.\n"));
+		} else {
+			sd_err(("Spid power was left on but signature read failed."
+			        " Value read = 0x%x\n", regdata));
+			return FALSE;
+		}
+	} else {
+		sd->wordlen = 2;
+
+#define CTRL_REG_DEFAULT	0x00010430 /* according to the host m/c */
+
+		wrregdata = (CTRL_REG_DEFAULT);
+		sd->resp_delay_all = TRUE;
+		if (sd->resp_delay_all == TRUE) {
+			/* Enable response delay for all */
+			wrregdata |= (RESP_DELAY_ALL << 16);
+			/* Program response delay value */
+			wrregdata &= 0xffff00ff;
+			wrregdata |= (F1_RESPONSE_DELAY << 8);
+			sd->prev_fun = SPI_FUNC_1;
+			bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+		}
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+		sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata));
+
+#ifndef HSMODE
+		wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY);
+		wrregdata &= ~HIGH_SPEED_MODE;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+#endif /* HSMODE */
+
+		for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+			if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) {
+				sd_trace(("0xfeedbead was leftshifted by 1-bit.\n"));
+				if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4,
+				     &regdata)) != SUCCESS)
+					return FALSE;
+			}
+			OSL_DELAY(1000);
+		}
+
+
+		/* Change to host controller intr-polarity of active-low */
+		wrregdata &= ~INTR_POLARITY;
+		sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n",
+		        wrregdata));
+		/* Change to 32bit mode */
+		wrregdata |= WORD_LENGTH_32;
+		bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+
+		/* Change command/data packaging in 32bit LE mode */
+		sd->wordlen = 4;
+
+		if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+			return FALSE;
+
+		if (regdata == TEST_RO_DATA_32BIT_LE) {
+			sd_trace(("Read spid passed. Value read = 0x%x\n", regdata));
+			sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n"));
+		} else {
+			sd_err(("Stale spid reg values read as it was kept powered. Value read ="
+			  "0x%x\n", regdata));
+			return FALSE;
+		}
+	}
+
+
+	return TRUE;
+}
+
+static bool
+bcmspi_test_card(sdioh_info_t *sd)
+{
+	uint32 regdata;
+	int status;
+
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, &regdata)) != SUCCESS)
+		return FALSE;
+
+	if (regdata == (TEST_RO_DATA_32BIT_LE))
+		sd_trace(("32bit LE regdata = 0x%x\n", regdata));
+	else {
+		sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata));
+		return FALSE;
+	}
+
+
+#define RW_PATTERN1	0xA0A1A2A3
+#define RW_PATTERN2	0x4B5B6B7B
+
+	regdata = RW_PATTERN1;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN1) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN1, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	regdata = RW_PATTERN2;
+	if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+		return FALSE;
+	regdata = 0;
+	if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, &regdata)) != SUCCESS)
+		return FALSE;
+	if (regdata != RW_PATTERN2) {
+		sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+			RW_PATTERN2, regdata));
+		return FALSE;
+	} else
+		sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+	return TRUE;
+}
+
+static int
+bcmspi_driver_init(sdioh_info_t *sd)
+{
+	sd_trace(("%s\n", __FUNCTION__));
+	if ((bcmspi_host_init(sd)) != SUCCESS) {
+		return ERROR;
+	}
+
+	if (bcmspi_client_init(sd) != SUCCESS) {
+		return ERROR;
+	}
+
+	return SUCCESS;
+}
+
+/* Read device reg */
+static int
+bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func,
+	         regaddr, *data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize))
+	    != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+static int
+bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+
+	ASSERT(regsize);
+
+	if (func == 2)
+		sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);	/* Fixed access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize))
+	    != SUCCESS)
+		return status;
+
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func,
+	         regaddr, *data));
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	sd_trace(("dstatus =0x%x\n", dstatus));
+	return SUCCESS;
+}
+
+/* write a device register */
+static int
+bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+	int status;
+	uint32 cmd_arg, dstatus;
+
+	ASSERT(regsize);
+
+	cmd_arg = 0;
+
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 1, func,
+	         regaddr, data));
+
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize))
+	    != SUCCESS)
+		return status;
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+/* write a device register - 1 byte */
+static int
+bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte)
+{
+	int status;
+	uint32 cmd_arg;
+	uint32 dstatus;
+	uint32 data = (uint32)(*byte);
+
+	cmd_arg = 0;
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);	/* Incremental access */
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+	cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_trace(("%s: func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, func,
+	         regaddr, data));
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma,
+	                              cmd_arg, &data, 1)) != SUCCESS) {
+		return status;
+	}
+
+	bcmspi_cmd_getdstatus(sd, &dstatus);
+	if (dstatus)
+		sd_trace(("dstatus =0x%x\n", dstatus));
+
+	return SUCCESS;
+}
+
+void
+bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer)
+{
+	*dstatus_buffer = sd->card_dstatus;
+}
+
+/* 'data' is of type uint32 whereas other buffers are of type uint8 */
+static int
+bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+                uint32 *data, uint32 datalen)
+{
+	uint32	i, j;
+	uint8	resp_delay = 0;
+	int	err = SUCCESS;
+	uint32	hostlen;
+	uint32 spilen = 0;
+	uint32 dstatus_idx = 0;
+	uint16 templen, buslen, len, *ptr = NULL;
+
+	sd_trace(("spi cmd = 0x%x\n", cmd_arg));
+
+	if (DWORDMODE_ON) {
+		spilen = GFIELD(cmd_arg, SPI_LEN);
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) ||
+		    (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1))
+			dstatus_idx = spilen * 3;
+
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+		    (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+			spilen = spilen << 2;
+			dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0;
+			/* convert len to mod16 size */
+			spilen = ROUNDUP(spilen, 16);
+			cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+		}
+	}
+
+	/* Set up and issue the SPI command.  MSByte goes out on bus first.  Increase datalen
+	 * according to the wordlen mode(16/32bit) the device is in.
+	 */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		*(uint32 *)spi_outbuf = bcmswap32(cmd_arg);
+		if (datalen & 0x3)
+			datalen += (4 - (datalen & 0x3));
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		*(uint16 *)spi_outbuf = bcmswap16(cmd_arg & 0xffff);
+		*(uint16 *)&spi_outbuf[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16);
+		if (datalen & 0x1)
+			datalen++;
+		if (datalen < 4)
+			datalen = ROUNDUP(datalen, 4);
+	} else {
+		sd_err(("Host is %d bit spid, could not create SPI command.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+
+	/* for Write, put the data into the output buffer */
+	if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) {
+		/* We send len field of hw-header always a mod16 size, both from host and dongle */
+		if (DWORDMODE_ON) {
+			if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) {
+				ptr = (uint16 *)&data[0];
+				templen = *ptr;
+				/* ASSERT(*ptr == ~*(ptr + 1)); */
+				templen = ROUNDUP(templen, 16);
+				*ptr = templen;
+				sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1))));
+			}
+		}
+
+		if (datalen != 0) {
+			for (i = 0; i < datalen/4; i++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					*(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+						bcmswap32(data[i]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					*(uint16 *)&spi_outbuf[i * 4 + CMDLEN] =
+						bcmswap16(data[i] & 0xffff);
+					*(uint16 *)&spi_outbuf[i * 4 + CMDLEN + 2] =
+						bcmswap16((data[i] & 0xffff0000) >> 16);
+				}
+			}
+		}
+	}
+
+	/* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */
+	if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) {
+		int func = GFIELD(cmd_arg, SPI_FUNCTION);
+		switch (func) {
+			case 0:
+				resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0;
+				break;
+			case 1:
+				resp_delay = F1_RESPONSE_DELAY;
+				break;
+			case 2:
+				resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0;
+				break;
+			default:
+				ASSERT(0);
+				break;
+		}
+		/* Program response delay */
+	        bcmspi_prog_resp_delay(sd, func, resp_delay);
+	}
+
+	/* +4 for cmd and +4 for dstatus */
+	hostlen = datalen + 8 + resp_delay;
+	hostlen += dstatus_idx;
+	hostlen += (4 - (hostlen & 0x3));
+	spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen);
+
+	/* for Read, get the data into the input buffer */
+	if (datalen != 0) {
+		if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */
+			for (j = 0; j < datalen/4; j++) {
+				if (sd->wordlen == 4) { /* 32bit spid */
+					data[j] = bcmswap32(*(uint32 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay]);
+				} else if (sd->wordlen == 2) { /* 16bit spid */
+					data[j] = (bcmswap16(*(uint16 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay])) |
+					         ((bcmswap16(*(uint16 *)&spi_inbuf[j * 4 +
+					            CMDLEN + resp_delay + 2])) << 16);
+				}
+			}
+
+			if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+				ptr = (uint16 *)&data[0];
+				templen = *ptr;
+				buslen = len = ~(*(ptr + 1));
+				buslen = ROUNDUP(buslen, 16);
+				/* populate actual len in hw-header */
+				if (templen == buslen)
+					*ptr = len;
+			}
+		}
+	}
+
+	/* Restore back the len field of the hw header */
+	if (DWORDMODE_ON) {
+		if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+		    (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+			ptr = (uint16 *)&data[0];
+			*ptr = (uint16)(~*(ptr+1));
+		}
+	}
+
+	dstatus_idx += (datalen + CMDLEN + resp_delay);
+	/* Last 4bytes are dstatus.  Device is configured to return status bits. */
+	if (sd->wordlen == 4) { /* 32bit spid */
+		sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf[dstatus_idx]);
+	} else if (sd->wordlen == 2) { /* 16bit spid */
+		sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx]) |
+		                   (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx + 2]) << 16));
+	} else {
+		sd_err(("Host is %d bit machine, could not read SPI dstatus.\n",
+			8 * sd->wordlen));
+		return ERROR;
+	}
+	if (sd->card_dstatus == 0xffffffff) {
+		sd_err(("looks like not a GSPI device or device is not powered.\n"));
+	}
+
+	err = bcmspi_update_stats(sd, cmd_arg);
+
+	return err;
+
+}
+
+static int
+bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+                uint32 addr, int nbytes, uint32 *data)
+{
+	int status;
+	uint32 cmd_arg;
+	bool write = rw == SDIOH_READ ? 0 : 1;
+	uint retries = 0;
+
+	bool enable;
+	uint32	spilen;
+
+	cmd_arg = 0;
+
+	ASSERT(nbytes);
+	ASSERT(nbytes <= sd->client_block_size[func]);
+
+	if (write) sd->t_cnt++; else sd->r_cnt++;
+
+	if (func == 2) {
+		/* Frame len check limited by gSPI. */
+		if ((nbytes > 2000) && write) {
+			sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes));
+		}
+		/* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */
+		/* If F2 fifo on device is not ready to receive data, don't do F2 transfer */
+		if (write) {
+			uint32 dstatus;
+			/* check F2 ready with cached one */
+			bcmspi_cmd_getdstatus(sd, &dstatus);
+			if ((dstatus & STATUS_F2_RX_READY) == 0) {
+				retries = WAIT_F2RXFIFORDY;
+				enable = 0;
+				while (retries-- && !enable) {
+					OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000);
+					bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4,
+					                   &dstatus);
+					if (dstatus & STATUS_F2_RX_READY)
+						enable = TRUE;
+				}
+				if (!enable) {
+					struct spierrstats_t *spierrstats = &sd->spierrstats;
+					spierrstats->f2rxnotready++;
+					sd_err(("F2 FIFO is not ready to receive data.\n"));
+					return ERROR;
+				}
+				sd_trace(("No of retries on F2 ready %d\n",
+					(WAIT_F2RXFIFORDY - retries)));
+			}
+		}
+	}
+
+	/* F2 transfers happen on 0 addr */
+	addr = (func == 2) ? 0 : addr;
+
+	/* In pio mode buffer is read using fixed address fifo in func 1 */
+	if ((func == 1) && (fifo))
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);
+	else
+		cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);
+
+	cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+	cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr);
+	cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write);
+	spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes);
+	if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+		/* convert len to mod4 size */
+		spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0);
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+	} else
+		cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen);
+
+	if ((func == 2) && (fifo == 1)) {
+		sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+		          __FUNCTION__, write ? "Wr" : "Rd", func, "INCR",
+		          addr, nbytes, sd->r_cnt, sd->t_cnt));
+	}
+
+	sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+	sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+	         __FUNCTION__, write ? "Wd" : "Rd", func, "INCR",
+	         addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+
+	if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg,
+	     data, nbytes)) != SUCCESS) {
+		sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__,
+			(write ? "write" : "read")));
+		return status;
+	}
+
+	/* gSPI expects that hw-header-len is equal to spi-command-len */
+	if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) {
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff));
+		ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16)));
+	}
+
+	if ((nbytes > 2000) && !write) {
+		sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes));
+	}
+
+	return SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+	si->card_init_done = FALSE;
+	return bcmspi_client_init(si);
+}
diff --git a/drivers/net/wireless/libra/Makefile b/drivers/net/wireless/libra/Makefile
new file mode 100644
index 0000000..3c606ba
--- /dev/null
+++ b/drivers/net/wireless/libra/Makefile
@@ -0,0 +1,14 @@
+
+# Makefile for wlan sdio if driver
+
+librasdioif-objs += libra_sdioif.o
+
+ifdef CONFIG_ARCH_MSM8X60
+	librasdioif-objs += qcomwlan_pwrif.o
+endif
+
+ifdef CONFIG_ARCH_MSM7X27A
+	librasdioif-objs += qcomwlan7x27a_pwrif.o
+endif
+
+obj-$(CONFIG_LIBRA_SDIOIF) += librasdioif.o
diff --git a/drivers/net/wireless/libra/libra_sdioif.c b/drivers/net/wireless/libra/libra_sdioif.c
new file mode 100644
index 0000000..3955642
--- /dev/null
+++ b/drivers/net/wireless/libra/libra_sdioif.c
@@ -0,0 +1,481 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/libra_sdioif.h>
+#include <linux/delay.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+/* Libra SDIO function device */
+static struct sdio_func *libra_sdio_func;
+static struct mmc_host *libra_mmc_host;
+static int libra_mmc_host_index;
+
+/* SDIO Card ID / Device ID */
+static unsigned short  libra_sdio_card_id;
+
+static suspend_handler_t *libra_suspend_hldr;
+static resume_handler_t *libra_resume_hldr;
+
+/**
+ * libra_sdio_configure() - Function to configure the SDIO device param
+ * @libra_sdio_rxhandler    Rx handler
+ * @func_drv_fn             Function driver function for special setup
+ * @funcdrv_timeout         Function Enable timeout
+ * @blksize                 Block size
+ *
+ * Configure SDIO device, enable function and set block size
+ */
+int libra_sdio_configure(sdio_irq_handler_t libra_sdio_rxhandler,
+	void  (*func_drv_fn)(int *status),
+	unsigned int funcdrv_timeout, unsigned int blksize)
+{
+	int err_ret = 0;
+	struct sdio_func *func = libra_sdio_func;
+
+	if (libra_sdio_func == NULL) {
+		printk(KERN_ERR "%s: Error SDIO card not detected\n", __func__);
+		goto cfg_error;
+	}
+
+	sdio_claim_host(func);
+
+	/* Currently block sizes are set here. */
+	func->max_blksize = blksize;
+	if (sdio_set_block_size(func, blksize)) {
+		printk(KERN_ERR "%s: Unable to set the block size.\n",
+				__func__);
+		sdio_release_host(func);
+		goto cfg_error;
+	}
+
+	/* Function driver specific configuration. */
+	if (func_drv_fn) {
+		(*func_drv_fn)(&err_ret);
+		if (err_ret) {
+			printk(KERN_ERR "%s: function driver provided configure function error=%d\n",
+				__func__, err_ret);
+			sdio_release_host(func);
+			goto cfg_error;
+		}
+	}
+
+	/* We set this based on the function card. */
+	func->enable_timeout = funcdrv_timeout;
+	err_ret = sdio_enable_func(func);
+	if (err_ret != 0) {
+		printk(KERN_ERR "%s: Unable to enable function %d\n",
+				__func__, err_ret);
+		sdio_release_host(func);
+		goto cfg_error;
+	}
+
+	if (sdio_claim_irq(func, libra_sdio_rxhandler)) {
+		sdio_disable_func(func);
+		printk(KERN_ERR "%s: Unable to claim irq.\n", __func__);
+		sdio_release_host(func);
+		goto cfg_error;
+	}
+
+	sdio_release_host(func);
+
+	return 0;
+
+cfg_error:
+	return -1;
+
+}
+EXPORT_SYMBOL(libra_sdio_configure);
+
+int libra_sdio_configure_suspend_resume(
+		suspend_handler_t *libra_sdio_suspend_hdlr,
+		resume_handler_t *libra_sdio_resume_hdlr)
+{
+	libra_suspend_hldr = libra_sdio_suspend_hdlr;
+	libra_resume_hldr = libra_sdio_resume_hdlr;
+	return 0;
+}
+EXPORT_SYMBOL(libra_sdio_configure_suspend_resume);
+
+/*
+ * libra_sdio_deconfigure() - Function to reset the SDIO device param
+ */
+void libra_sdio_deconfigure(struct sdio_func *func)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	sdio_claim_host(func);
+	sdio_release_irq(func);
+	sdio_disable_func(func);
+	sdio_release_host(func);
+}
+EXPORT_SYMBOL(libra_sdio_deconfigure);
+
+int libra_enable_sdio_irq(struct sdio_func *func, u8 enable)
+{
+	if (libra_mmc_host && libra_mmc_host->ops &&
+			libra_mmc_host->ops->enable_sdio_irq) {
+		libra_mmc_host->ops->enable_sdio_irq(libra_mmc_host, enable);
+		return 0;
+	}
+
+	printk(KERN_ERR "%s: Could not enable disable irq\n", __func__);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(libra_enable_sdio_irq);
+
+int libra_disable_sdio_irq_capability(struct sdio_func *func, u8 disable)
+{
+	if (libra_mmc_host) {
+		if (disable)
+			libra_mmc_host->caps &= ~MMC_CAP_SDIO_IRQ;
+		else
+			libra_mmc_host->caps |= MMC_CAP_SDIO_IRQ;
+		return 0;
+	}
+	printk(KERN_ERR "%s: Could not change sdio capabilities to polling\n",
+			__func__);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(libra_disable_sdio_irq_capability);
+
+/*
+ * libra_sdio_release_irq() - Function to release IRQ
+ */
+void libra_sdio_release_irq(struct sdio_func *func)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	sdio_release_irq(func);
+}
+EXPORT_SYMBOL(libra_sdio_release_irq);
+
+/*
+ * libra_sdio_disable_func() - Function to disable sdio func
+ */
+void libra_sdio_disable_func(struct sdio_func *func)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	sdio_disable_func(func);
+}
+EXPORT_SYMBOL(libra_sdio_disable_func);
+
+/*
+ * Return the SDIO Function device
+ */
+struct sdio_func *libra_getsdio_funcdev(void)
+{
+	return libra_sdio_func;
+}
+EXPORT_SYMBOL(libra_getsdio_funcdev);
+
+/*
+ * Set function driver as the private data for the function device
+ */
+void libra_sdio_setprivdata(struct sdio_func *sdio_func_dev,
+		void *padapter)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	sdio_set_drvdata(sdio_func_dev, padapter);
+}
+EXPORT_SYMBOL(libra_sdio_setprivdata);
+
+/*
+ * Return private data of the function device.
+ */
+void *libra_sdio_getprivdata(struct sdio_func *sdio_func_dev)
+{
+	return sdio_get_drvdata(sdio_func_dev);
+}
+EXPORT_SYMBOL(libra_sdio_getprivdata);
+
+/*
+ * Function driver claims the SDIO device
+ */
+void libra_claim_host(struct sdio_func *sdio_func_dev,
+		pid_t *curr_claimed, pid_t current_pid, atomic_t *claim_count)
+{
+	if (NULL == libra_sdio_func)
+		return;
+
+	if (*curr_claimed == current_pid) {
+		atomic_inc(claim_count);
+		return;
+	}
+
+	/* Go ahead and claim the host if not locked by anybody. */
+	sdio_claim_host(sdio_func_dev);
+
+	*curr_claimed = current_pid;
+	atomic_inc(claim_count);
+
+}
+EXPORT_SYMBOL(libra_claim_host);
+
+/*
+ * Function driver releases the SDIO device
+ */
+void libra_release_host(struct sdio_func *sdio_func_dev,
+		pid_t *curr_claimed, pid_t current_pid, atomic_t *claim_count)
+{
+
+	if (NULL == libra_sdio_func)
+		return;
+
+	if (*curr_claimed != current_pid) {
+		/* Dont release  */
+		return;
+	}
+
+	atomic_dec(claim_count);
+	if (atomic_read(claim_count) == 0) {
+		*curr_claimed = 0;
+		sdio_release_host(sdio_func_dev);
+	}
+}
+EXPORT_SYMBOL(libra_release_host);
+
+void libra_sdiocmd52(struct sdio_func *sdio_func_dev, unsigned int addr,
+	u8 *byte_var, int write, int *err_ret)
+{
+	if (write)
+		sdio_writeb(sdio_func_dev, byte_var[0], addr, err_ret);
+	else
+		byte_var[0] = sdio_readb(sdio_func_dev, addr, err_ret);
+}
+EXPORT_SYMBOL(libra_sdiocmd52);
+
+u8 libra_sdio_readsb(struct sdio_func *func, void *dst,
+	unsigned int addr, int count)
+{
+	return sdio_readsb(func, dst, addr, count);
+}
+EXPORT_SYMBOL(libra_sdio_readsb);
+
+int libra_sdio_memcpy_fromio(struct sdio_func *func,
+		void *dst, unsigned int addr, int count)
+{
+	return sdio_memcpy_fromio(func, dst, addr, count);
+}
+EXPORT_SYMBOL(libra_sdio_memcpy_fromio);
+
+int libra_sdio_writesb(struct sdio_func *func,
+		unsigned int addr, void *src, int count)
+{
+	return sdio_writesb(func, addr, src, count);
+}
+EXPORT_SYMBOL(libra_sdio_writesb);
+
+int libra_sdio_memcpy_toio(struct sdio_func *func,
+	unsigned int addr, void *src, int count)
+{
+	return sdio_memcpy_toio(func, addr, src, count);
+}
+EXPORT_SYMBOL(libra_sdio_memcpy_toio);
+
+int libra_detect_card_change(void)
+{
+	if (libra_mmc_host) {
+		if (!strcmp(libra_mmc_host->class_dev.class->name, "mmc_host")
+			&& (libra_mmc_host_index == libra_mmc_host->index)) {
+			mmc_detect_change(libra_mmc_host, 0);
+			return 0;
+		}
+	}
+
+	printk(KERN_ERR "%s: Could not trigger card change\n", __func__);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(libra_detect_card_change);
+
+int libra_sdio_enable_polling(void)
+{
+	if (libra_mmc_host) {
+		if (!strcmp(libra_mmc_host->class_dev.class->name, "mmc_host")
+			&& (libra_mmc_host_index == libra_mmc_host->index)) {
+			libra_mmc_host->caps |= MMC_CAP_NEEDS_POLL;
+			mmc_detect_change(libra_mmc_host, 0);
+			return 0;
+		}
+	}
+
+	printk(KERN_ERR "%s: Could not trigger SDIO scan\n", __func__);
+	return -1;
+}
+EXPORT_SYMBOL(libra_sdio_enable_polling);
+
+void libra_sdio_set_clock(struct sdio_func *func, unsigned int clk_freq)
+{
+    struct mmc_host *host = func->card->host;
+    host->ios.clock = clk_freq;
+    host->ops->set_ios(host, &host->ios);
+
+}
+EXPORT_SYMBOL(libra_sdio_set_clock);
+
+/*
+ * API to get SDIO Device Card ID
+ */
+void libra_sdio_get_card_id(struct sdio_func *func, unsigned short *card_id)
+{
+	if (card_id)
+		*card_id = libra_sdio_card_id;
+}
+EXPORT_SYMBOL(libra_sdio_get_card_id);
+
+/*
+ * SDIO Probe
+ */
+static int libra_sdio_probe(struct sdio_func *func,
+		const struct sdio_device_id *sdio_dev_id)
+{
+	libra_mmc_host = func->card->host;
+	libra_mmc_host_index = libra_mmc_host->index;
+	libra_sdio_func = func;
+	libra_sdio_card_id = sdio_dev_id->device;
+
+	printk(KERN_INFO "%s: success with block size of %d device_id=0x%x\n",
+		__func__,
+		func->cur_blksize,
+		sdio_dev_id->device);
+
+	/* Turn off SDIO polling from now on */
+	libra_mmc_host->caps &= ~MMC_CAP_NEEDS_POLL;
+	return 0;
+}
+
+static void libra_sdio_remove(struct sdio_func *func)
+{
+	libra_sdio_func = NULL;
+
+	printk(KERN_INFO "%s : Module removed.\n", __func__);
+}
+
+#ifdef CONFIG_PM
+static int libra_sdio_suspend(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+	int ret = 0;
+
+	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
+	if (ret) {
+		printk(KERN_ERR "%s: Error Host doesn't support the keep power capability\n" ,
+			__func__);
+		return ret;
+	}
+	if (libra_suspend_hldr) {
+		/* Disable SDIO IRQ when driver is being suspended */
+		libra_enable_sdio_irq(func, 0);
+		ret = libra_suspend_hldr(func);
+		if (ret) {
+			printk(KERN_ERR
+			"%s: Libra driver is not able to suspend\n" , __func__);
+			/* Error - Restore SDIO IRQ */
+			libra_enable_sdio_irq(func, 1);
+			return ret;
+		}
+	}
+
+
+	return sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+}
+
+static int libra_sdio_resume(struct device *dev)
+{
+	struct sdio_func *func = dev_to_sdio_func(dev);
+
+	if (libra_resume_hldr) {
+		libra_resume_hldr(func);
+		/* Restore SDIO IRQ */
+		libra_enable_sdio_irq(func, 1);
+	}
+
+	return 0;
+}
+#else
+#define libra_sdio_suspend 0
+#define libra_sdio_resume 0
+#endif
+
+static struct sdio_device_id libra_sdioid[] = {
+    {.class = 0, .vendor = LIBRA_MAN_ID,  .device = LIBRA_REV_1_0_CARD_ID},
+    {.class = 0, .vendor = VOLANS_MAN_ID, .device = VOLANS_REV_2_0_CARD_ID},
+    {}
+};
+
+static const struct dev_pm_ops libra_sdio_pm_ops = {
+    .suspend = libra_sdio_suspend,
+    .resume = libra_sdio_resume,
+};
+
+static struct sdio_driver libra_sdiofn_driver = {
+    .name      = "libra_sdiofn",
+    .id_table  = libra_sdioid,
+    .probe     = libra_sdio_probe,
+    .remove    = libra_sdio_remove,
+    .drv.pm    = &libra_sdio_pm_ops,
+};
+
+static int __init libra_sdioif_init(void)
+{
+	libra_sdio_func = NULL;
+	libra_mmc_host = NULL;
+	libra_mmc_host_index = -1;
+	libra_suspend_hldr = NULL;
+	libra_resume_hldr = NULL;
+
+	sdio_register_driver(&libra_sdiofn_driver);
+
+	printk(KERN_INFO "%s: Loaded Successfully\n", __func__);
+
+	return 0;
+}
+
+static void __exit libra_sdioif_exit(void)
+{
+	unsigned int attempts = 0;
+
+	if (!libra_detect_card_change()) {
+		do {
+			++attempts;
+			msleep(500);
+		} while (libra_sdio_func != NULL && attempts < 3);
+	}
+
+	if (libra_sdio_func != NULL)
+		printk(KERN_ERR "%s: Card removal not detected\n", __func__);
+
+	sdio_unregister_driver(&libra_sdiofn_driver);
+
+	libra_sdio_func = NULL;
+	libra_mmc_host = NULL;
+	libra_mmc_host_index = -1;
+
+	printk(KERN_INFO "%s: Unloaded Successfully\n", __func__);
+}
+
+module_init(libra_sdioif_init);
+module_exit(libra_sdioif_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("WLAN SDIODriver");
diff --git a/drivers/net/wireless/libra/qcomwlan7x27a_pwrif.c b/drivers/net/wireless/libra/qcomwlan7x27a_pwrif.c
new file mode 100644
index 0000000..ca2680f
--- /dev/null
+++ b/drivers/net/wireless/libra/qcomwlan7x27a_pwrif.c
@@ -0,0 +1,172 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <mach/vreg.h>
+#include <linux/gpio.h>
+#include <mach/rpc_pmapp.h>
+#include <linux/err.h>
+#include <linux/qcomwlan7x27a_pwrif.h>
+
+#define WLAN_GPIO_EXT_POR_N     134
+
+static const char *id = "WLAN";
+
+enum {
+	WLAN_VREG_L17 = 0,
+	WLAN_VREG_S3,
+	WLAN_VREG_TCXO_L11,
+	WLAN_VREG_L19,
+	WLAN_VREG_L5,
+	WLAN_VREG_L6
+};
+
+struct wlan_vreg_info {
+	const char *vreg_id;
+	unsigned int vreg_level;
+	unsigned int pmapp_id;
+	unsigned int is_vreg_pin_controlled;
+	struct vreg *vreg;
+};
+
+
+static struct wlan_vreg_info vreg_info[] = {
+	{"bt", 3050, 56, 0, NULL},
+	{"msme1", 1800, 2, 0, NULL},
+	{"wlan_tcx0", 1800, 53, 0, NULL},
+	{"wlan4", 1200, 57, 0, NULL},
+	{"wlan2", 1350, 45, 0, NULL},
+	{"wlan3", 1200, 51, 0, NULL} };
+
+int chip_power_qrf6285(bool on)
+{
+	int rc = 0, index = 0;
+
+	if (on) {
+		rc = gpio_request(WLAN_GPIO_EXT_POR_N, "WLAN_DEEP_SLEEP_N");
+
+		if (rc) {
+			pr_err("WLAN reset GPIO %d request failed %d\n",
+			WLAN_GPIO_EXT_POR_N, rc);
+			goto fail;
+		}
+		rc = gpio_direction_output(WLAN_GPIO_EXT_POR_N, 1);
+		if (rc < 0) {
+			pr_err("WLAN reset GPIO %d set direction failed %d\n",
+			WLAN_GPIO_EXT_POR_N, rc);
+			goto fail_gpio_dir_out;
+		}
+	} else {
+		gpio_set_value_cansleep(WLAN_GPIO_EXT_POR_N, 0);
+		gpio_free(WLAN_GPIO_EXT_POR_N);
+	}
+
+
+	for (index = 0; index < ARRAY_SIZE(vreg_info); index++) {
+		vreg_info[index].vreg = vreg_get(NULL,
+						vreg_info[index].vreg_id);
+		if (IS_ERR(vreg_info[index].vreg)) {
+			pr_err("%s:%s vreg get failed %ld\n",
+				__func__, vreg_info[index].vreg_id,
+				PTR_ERR(vreg_info[index].vreg));
+			rc = PTR_ERR(vreg_info[index].vreg);
+			if (on)
+				goto vreg_fail;
+			else
+				continue;
+		}
+		if (on) {
+			rc = vreg_set_level(vreg_info[index].vreg,
+					 vreg_info[index].vreg_level);
+			if (rc) {
+				pr_err("%s:%s vreg set level failed %d\n",
+					__func__, vreg_info[index].vreg_id, rc);
+				goto vreg_fail;
+			}
+			if (vreg_info[index].is_vreg_pin_controlled) {
+				rc = pmapp_vreg_pincntrl_vote(id,
+					 vreg_info[index].pmapp_id,
+					 PMAPP_CLOCK_ID_A0, 1);
+				if (rc) {
+					pr_err("%s:%s pmapp_vreg_pincntrl_vote"
+						" for enable failed %d\n",
+						__func__,
+						vreg_info[index].vreg_id, rc);
+					goto vreg_fail;
+				}
+			} else {
+				rc = vreg_enable(vreg_info[index].vreg);
+				if (rc) {
+					pr_err("%s:%s vreg enable failed %d\n",
+						__func__,
+						vreg_info[index].vreg_id, rc);
+					goto vreg_fail;
+				}
+			}
+
+			if (WLAN_VREG_TCXO_L11 == index) {
+				/*
+				 * Configure TCXO to be slave to
+				 * WLAN_CLK_PWR_REQ
+`				 */
+				rc = pmapp_clock_vote(id, PMAPP_CLOCK_ID_A0,
+						PMAPP_CLOCK_VOTE_PIN_CTRL);
+				if (rc) {
+					pr_err("%s: Configuring TCXO to Pin"
+					" controllable failed %d\n",
+							 __func__, rc);
+					goto vreg_clock_vote_fail;
+				}
+			}
+
+		} else {
+
+			if (vreg_info[index].is_vreg_pin_controlled) {
+				rc = pmapp_vreg_pincntrl_vote(id,
+						 vreg_info[index].pmapp_id,
+						 PMAPP_CLOCK_ID_A0, 0);
+				if (rc) {
+					pr_err("%s:%s pmapp_vreg_pincntrl_vote"
+						" for disable failed %d\n",
+						__func__,
+						vreg_info[index].vreg_id, rc);
+				}
+			} else {
+				rc = vreg_disable(vreg_info[index].vreg);
+				if (rc) {
+					pr_err("%s:%s vreg disable failed %d\n",
+						__func__,
+						vreg_info[index].vreg_id, rc);
+				}
+			}
+		}
+	}
+	return 0;
+vreg_fail:
+	index--;
+vreg_clock_vote_fail:
+	while (index > 0) {
+		rc = vreg_disable(vreg_info[index].vreg);
+		if (rc) {
+			pr_err("%s:%s vreg disable failed %d\n",
+				__func__, vreg_info[index].vreg_id, rc);
+		}
+		index--;
+	}
+	if (!on)
+		goto fail;
+fail_gpio_dir_out:
+	gpio_free(WLAN_GPIO_EXT_POR_N);
+fail:
+	return rc;
+}
+EXPORT_SYMBOL(chip_power_qrf6285);
diff --git a/drivers/net/wireless/libra/qcomwlan_pwrif.c b/drivers/net/wireless/libra/qcomwlan_pwrif.c
new file mode 100644
index 0000000..bb5e135
--- /dev/null
+++ b/drivers/net/wireless/libra/qcomwlan_pwrif.c
@@ -0,0 +1,256 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qcomwlan_pwrif.h>
+
+#define GPIO_WLAN_DEEP_SLEEP_N  230
+#define WLAN_RESET_OUT          1
+#define WLAN_RESET              0
+
+static const char *id = "WLAN";
+
+/**
+ * vos_chip_power_qrf8615() - WLAN Power Up Seq for WCN1314 rev 2.0 on QRF 8615
+ * @on - Turn WLAN ON/OFF (1 or 0)
+ *
+ * Power up/down WLAN by turning on/off various regs and asserting/deasserting
+ * Power-on-reset pin. Also, put XO A0 buffer as slave to wlan_clk_pwr_req while
+ * turning ON WLAN and vice-versa.
+ *
+ * This function returns 0 on success or a non-zero value on failure.
+ */
+int vos_chip_power_qrf8615(int on)
+{
+	static char wlan_on;
+	static const char *vregs_qwlan_name[] = {
+		"8058_l20",
+		"8058_l8",
+		"8901_s4",
+		"8901_lvs1",
+		"8901_l0",
+		"8058_s2",
+		"8058_s1",
+	};
+	static const int vregs_qwlan_val_min[] = {
+		1800000,
+		3050000,
+		1225000,
+		0,
+		1200000,
+		1300000,
+		500000,
+	};
+	static const int vregs_qwlan_val_max[] = {
+		1800000,
+		3050000,
+		1225000,
+		0,
+		1200000,
+		1300000,
+		1250000,
+	};
+	static const bool vregs_is_pin_controlled[] = {
+		1,
+		1,
+		0,
+		0,
+		1,
+		1,
+		0,
+	};
+	static struct regulator *vregs_qwlan[ARRAY_SIZE(vregs_qwlan_name)];
+	static struct msm_xo_voter *wlan_clock;
+	int ret, i, rc = 0;
+
+	/* WLAN RESET and CLK settings */
+	if (on && !wlan_on) {
+		/*
+		 * Program U12 GPIO expander pin IO1 to de-assert (drive 0)
+		 * WLAN_EXT_POR_N to put WLAN in reset
+		 */
+		rc = gpio_request(GPIO_WLAN_DEEP_SLEEP_N, "WLAN_DEEP_SLEEP_N");
+		if (rc) {
+			pr_err("WLAN reset GPIO %d request failed\n",
+					GPIO_WLAN_DEEP_SLEEP_N);
+			goto fail;
+		}
+		rc = gpio_direction_output(GPIO_WLAN_DEEP_SLEEP_N,
+				WLAN_RESET_OUT);
+		if (rc < 0) {
+			pr_err("WLAN reset GPIO %d set output direction failed",
+					GPIO_WLAN_DEEP_SLEEP_N);
+			goto fail_gpio_dir_out;
+		}
+
+		/* Configure TCXO to be slave to WLAN_CLK_PWR_REQ */
+		if (wlan_clock == NULL) {
+			wlan_clock = msm_xo_get(MSM_XO_TCXO_A0, id);
+			if (IS_ERR(wlan_clock)) {
+				pr_err("Failed to get TCXO_A0 voter (%ld)\n",
+						PTR_ERR(wlan_clock));
+				goto fail_gpio_dir_out;
+			}
+		}
+
+		rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_PIN_CTRL);
+		if (rc < 0) {
+			pr_err("Configuring TCXO to Pin controllable failed"
+					"(%d)\n", rc);
+			goto fail_xo_mode_vote;
+		}
+	} else if (!on && wlan_on) {
+		if (wlan_clock != NULL)
+			msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_OFF);
+		gpio_set_value_cansleep(GPIO_WLAN_DEEP_SLEEP_N, WLAN_RESET);
+		gpio_free(GPIO_WLAN_DEEP_SLEEP_N);
+	}
+
+	/* WLAN VREG settings */
+	for (i = 0; i < ARRAY_SIZE(vregs_qwlan_name); i++) {
+		if (vregs_qwlan[i] == NULL) {
+			vregs_qwlan[i] = regulator_get(NULL,
+					vregs_qwlan_name[i]);
+			if (IS_ERR(vregs_qwlan[i])) {
+				pr_err("regulator get of %s failed (%ld)\n",
+						vregs_qwlan_name[i],
+						PTR_ERR(vregs_qwlan[i]));
+				rc = PTR_ERR(vregs_qwlan[i]);
+				goto vreg_get_fail;
+			}
+			if (vregs_qwlan_val_min[i] || vregs_qwlan_val_max[i]) {
+				rc = regulator_set_voltage(vregs_qwlan[i],
+						vregs_qwlan_val_min[i],
+						vregs_qwlan_val_max[i]);
+				if (rc) {
+					pr_err("regulator_set_voltage(%s) failed\n",
+							vregs_qwlan_name[i]);
+					goto vreg_fail;
+				}
+			}
+			/* vote for pin control (if needed) */
+			if (vregs_is_pin_controlled[i]) {
+				rc = regulator_set_mode(vregs_qwlan[i],
+						REGULATOR_MODE_IDLE);
+				if (rc) {
+					pr_err("regulator_set_mode(%s) failed\n",
+							vregs_qwlan_name[i]);
+					goto vreg_fail;
+				}
+			}
+		}
+		if (on && !wlan_on) {
+			rc = regulator_enable(vregs_qwlan[i]);
+			if (rc < 0) {
+				pr_err("vreg %s enable failed (%d)\n",
+						vregs_qwlan_name[i], rc);
+				goto vreg_fail;
+			}
+		} else if (!on && wlan_on) {
+			rc = regulator_disable(vregs_qwlan[i]);
+			if (rc < 0) {
+				pr_err("vreg %s disable failed (%d)\n",
+						vregs_qwlan_name[i], rc);
+				goto vreg_fail;
+			}
+		}
+	}
+	if (on)
+		wlan_on = true;
+	else
+		wlan_on = false;
+	return 0;
+
+vreg_fail:
+	regulator_put(vregs_qwlan[i]);
+vreg_get_fail:
+	i--;
+	while (i) {
+		ret = !on ? regulator_enable(vregs_qwlan[i]) :
+			regulator_disable(vregs_qwlan[i]);
+		if (ret < 0) {
+			pr_err("vreg %s %s failed (%d) in err path\n",
+					vregs_qwlan_name[i],
+					!on ? "enable" : "disable", ret);
+		}
+		regulator_put(vregs_qwlan[i]);
+		i--;
+	}
+	if (!on)
+		goto fail;
+fail_xo_mode_vote:
+	msm_xo_put(wlan_clock);
+fail_gpio_dir_out:
+	gpio_free(GPIO_WLAN_DEEP_SLEEP_N);
+fail:
+	return rc;
+}
+EXPORT_SYMBOL(vos_chip_power_qrf8615);
+
+/**
+ * qcomwlan_pmic_xo_core_force_enable() - Force XO Core of PMIC to be ALWAYS ON
+ * @on - Force XO Core  ON/OFF (1 or 0)
+ *
+ * The XO_CORE controls the XO feeding the TCXO buffers (A0, A1, etc.). WLAN
+ * wants to keep the XO core on even though our buffer A0 is in pin control
+ * because it can take a long time turn the XO back on and warm up the buffers.
+ * This helps in optimizing power in BMPS (power save) mode of WLAN.
+ * The WLAN driver wrapper function takes care that this API is not called
+ * consecutively.
+ *
+ * This function returns 0 on success or a non-zero value on failure.
+ */
+int qcomwlan_pmic_xo_core_force_enable(int on)
+{
+	static struct msm_xo_voter *wlan_ps;
+	int rc = 0;
+
+	if (wlan_ps == NULL) {
+		wlan_ps = msm_xo_get(MSM_XO_CORE, id);
+		if (IS_ERR(wlan_ps)) {
+			pr_err("Failed to get XO CORE voter (%ld)\n",
+					PTR_ERR(wlan_ps));
+			goto fail;
+		}
+	}
+
+	if (on)
+		rc = msm_xo_mode_vote(wlan_ps, MSM_XO_MODE_ON);
+	else
+		rc = msm_xo_mode_vote(wlan_ps, MSM_XO_MODE_OFF);
+
+	if (rc < 0) {
+		pr_err("XO Core %s failed (%d)\n",
+			on ? "enable" : "disable", rc);
+		goto fail_xo_mode_vote;
+	}
+	return 0;
+fail_xo_mode_vote:
+	msm_xo_put(wlan_ps);
+fail:
+	return rc;
+}
+EXPORT_SYMBOL(qcomwlan_pmic_xo_core_force_enable);
+
+
+/**
+ * qcomwlan_freq_change_1p3v_supply() - function to change the freq for 1.3V RF supply.
+ * @freq - freq of the 1.3V Supply
+ *
+ * This function returns 0 on success or a non-zero value on failure.
+ */
+
+int qcomwlan_freq_change_1p3v_supply(enum rpm_vreg_freq freq)
+{
+	return rpm_vreg_set_frequency(RPM_VREG_ID_PM8058_S2, freq);
+}
+EXPORT_SYMBOL(qcomwlan_freq_change_1p3v_supply);
diff --git a/drivers/net/wireless/wcnss/Makefile b/drivers/net/wireless/wcnss/Makefile
new file mode 100644
index 0000000..d182b6e
--- /dev/null
+++ b/drivers/net/wireless/wcnss/Makefile
@@ -0,0 +1,6 @@
+
+# Makefile for WCNSS WLAN driver
+
+wcnsswlan-objs += wcnss_wlan.o wcnss_riva.o qcomwlan_secif.o
+
+obj-$(CONFIG_WCNSS_WLAN) += wcnsswlan.o
diff --git a/drivers/net/wireless/wcnss/qcomwlan_secif.c b/drivers/net/wireless/wcnss/qcomwlan_secif.c
new file mode 100644
index 0000000..124f387
--- /dev/null
+++ b/drivers/net/wireless/wcnss/qcomwlan_secif.c
@@ -0,0 +1,62 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/qcomwlan_secif.h>
+
+/*
+ * APIs for calling crypto routines from kernel
+ */
+struct crypto_ahash *wcnss_wlan_crypto_alloc_ahash(const char *alg_name,
+							 u32 type, u32 mask)
+{
+	return crypto_alloc_ahash(alg_name, type, mask);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_alloc_ahash);
+
+int wcnss_wlan_crypto_ahash_digest(struct ahash_request *req)
+{
+	return crypto_ahash_digest(req);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_ahash_digest);
+
+void wcnss_wlan_crypto_free_ahash(struct crypto_ahash *tfm)
+{
+	crypto_free_ahash(tfm);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_free_ahash);
+
+int wcnss_wlan_crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	return crypto_ahash_setkey(tfm, key, keylen);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_ahash_setkey);
+
+struct crypto_ablkcipher *
+wcnss_wlan_crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask)
+{
+	return crypto_alloc_ablkcipher(alg_name, type, mask);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_alloc_ablkcipher);
+
+void wcnss_wlan_ablkcipher_request_free(struct ablkcipher_request *req)
+{
+	ablkcipher_request_free(req);
+}
+EXPORT_SYMBOL(wcnss_wlan_ablkcipher_request_free);
+
+void wcnss_wlan_crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
+{
+	crypto_free_ablkcipher(tfm);
+}
+EXPORT_SYMBOL(wcnss_wlan_crypto_free_ablkcipher);
+
diff --git a/drivers/net/wireless/wcnss/wcnss_riva.c b/drivers/net/wireless/wcnss/wcnss_riva.c
new file mode 100644
index 0000000..3617ba8
--- /dev/null
+++ b/drivers/net/wireless/wcnss/wcnss_riva.c
@@ -0,0 +1,314 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/pm8xxx/pm8921.h>
+#include <linux/mfd/pm8xxx/gpio.h>
+#include <mach/msm_xo.h>
+#include <mach/msm_iomap.h>
+
+#include "wcnss_riva.h"
+
+static void __iomem *msm_riva_base;
+static struct msm_xo_voter *wlan_clock;
+static const char *id = "WLAN";
+
+#define MSM_RIVA_PHYS                     0x03204000
+#define RIVA_PMU_CFG                      (msm_riva_base + 0x28)
+#define RIVA_PMU_CFG_IRIS_XO_CFG          BIT(3)
+#define RIVA_PMU_CFG_IRIS_XO_EN           BIT(4)
+#define RIVA_PMU_CFG_GC_BUS_MUX_SEL_TOP   BIT(5)
+#define RIVA_PMU_CFG_IRIS_XO_CFG_STS      BIT(6) /* 1: in progress, 0: done */
+
+#define RIVA_PMU_CFG_IRIS_XO_MODE         0x6
+#define RIVA_PMU_CFG_IRIS_XO_MODE_48      (3 << 1)
+
+#define VREG_NULL_CONFIG            0x0000
+#define VREG_GET_REGULATOR_MASK     0x0001
+#define VREG_SET_VOLTAGE_MASK       0x0002
+#define VREG_OPTIMUM_MODE_MASK      0x0004
+#define VREG_ENABLE_MASK            0x0008
+
+struct vregs_info {
+	const char * const name;
+	int state;
+	const int nominal_min;
+	const int low_power_min;
+	const int max_voltage;
+	const int uA_load;
+	struct regulator *regulator;
+};
+
+static struct vregs_info iris_vregs[] = {
+	{"iris_vddio",  VREG_NULL_CONFIG, 0000000, 0, 0000000, 0,      NULL},
+	{"iris_vddxo",  VREG_NULL_CONFIG, 1800000, 0, 1800000, 10000,  NULL},
+	{"iris_vddrfa", VREG_NULL_CONFIG, 1300000, 0, 1300000, 100000, NULL},
+	{"iris_vddpa",  VREG_NULL_CONFIG, 2900000, 0, 2900000, 515000, NULL},
+	{"iris_vdddig", VREG_NULL_CONFIG, 0000000, 0, 0000000, 0,      NULL},
+};
+
+static struct vregs_info riva_vregs[] = {
+	{"riva_vddmx",  VREG_NULL_CONFIG, 1050000, 0, 1150000, 0,      NULL},
+	{"riva_vddcx",  VREG_NULL_CONFIG, 1050000, 0, 1150000, 0,      NULL},
+	{"riva_vddpx",  VREG_NULL_CONFIG, 1800000, 0, 1800000, 0,      NULL},
+};
+
+static int configure_iris_xo(bool use_48mhz_xo, int on)
+{
+	u32 reg = 0;
+	int rc = 0;
+
+	if (on) {
+		msm_riva_base = ioremap(MSM_RIVA_PHYS, SZ_256);
+		if (!msm_riva_base) {
+			pr_err("ioremap MSM_RIVA_PHYS failed\n");
+			goto fail;
+		}
+
+		/* Enable IRIS XO */
+		writel_relaxed(0, RIVA_PMU_CFG);
+		reg = readl_relaxed(RIVA_PMU_CFG);
+		reg |= RIVA_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+				RIVA_PMU_CFG_IRIS_XO_EN;
+		writel_relaxed(reg, RIVA_PMU_CFG);
+
+		/* Clear XO_MODE[b2:b1] bits. Clear implies 19.2 MHz TCXO */
+		reg &= ~(RIVA_PMU_CFG_IRIS_XO_MODE);
+
+		if (use_48mhz_xo)
+			reg |= RIVA_PMU_CFG_IRIS_XO_MODE_48;
+
+		writel_relaxed(reg, RIVA_PMU_CFG);
+
+		/* Start IRIS XO configuration */
+		reg |= RIVA_PMU_CFG_IRIS_XO_CFG;
+		writel_relaxed(reg, RIVA_PMU_CFG);
+
+		/* Wait for XO configuration to finish */
+		while (readl_relaxed(RIVA_PMU_CFG) &
+						RIVA_PMU_CFG_IRIS_XO_CFG_STS)
+			cpu_relax();
+
+		/* Stop IRIS XO configuration */
+		reg &= ~(RIVA_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+				RIVA_PMU_CFG_IRIS_XO_CFG);
+		writel_relaxed(reg, RIVA_PMU_CFG);
+
+		if (!use_48mhz_xo) {
+			wlan_clock = msm_xo_get(MSM_XO_TCXO_A0, id);
+			if (IS_ERR(wlan_clock)) {
+				rc = PTR_ERR(wlan_clock);
+				pr_err("Failed to get MSM_XO_TCXO_A0 voter"
+							" (%d)\n", rc);
+				goto fail;
+			}
+
+			rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_ON);
+			if (rc < 0) {
+				pr_err("Configuring MSM_XO_MODE_ON failed"
+							" (%d)\n", rc);
+				goto msm_xo_vote_fail;
+			}
+		}
+	}  else {
+		if (wlan_clock != NULL && !use_48mhz_xo) {
+			rc = msm_xo_mode_vote(wlan_clock, MSM_XO_MODE_OFF);
+			if (rc < 0)
+				pr_err("Configuring MSM_XO_MODE_OFF failed"
+							" (%d)\n", rc);
+		}
+	}
+
+	/* Add some delay for XO to settle */
+	msleep(20);
+
+	return rc;
+
+msm_xo_vote_fail:
+	msm_xo_put(wlan_clock);
+
+fail:
+	return rc;
+}
+
+/* Helper routine to turn off all WCNSS vregs e.g. IRIS, Riva */
+static void wcnss_vregs_off(struct vregs_info regulators[], uint size)
+{
+	int i, rc = 0;
+
+	/* Regulators need to be turned off in the reverse order */
+	for (i = (size-1); i >= 0; i--) {
+		if (regulators[i].state == VREG_NULL_CONFIG)
+			continue;
+
+		/* Remove PWM mode */
+		if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) {
+			rc = regulator_set_optimum_mode(
+					regulators[i].regulator, 0);
+			if (rc < 0)
+				pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
+						regulators[i].name, rc);
+		}
+
+		/* Set voltage to lowest level */
+		if (regulators[i].state & VREG_SET_VOLTAGE_MASK) {
+			rc = regulator_set_voltage(regulators[i].regulator,
+					regulators[i].low_power_min,
+					regulators[i].max_voltage);
+			if (rc)
+				pr_err("regulator_set_voltage(%s) failed (%d)\n",
+						regulators[i].name, rc);
+		}
+
+		/* Disable regulator */
+		if (regulators[i].state & VREG_ENABLE_MASK) {
+			rc = regulator_disable(regulators[i].regulator);
+			if (rc < 0)
+				pr_err("vreg %s disable failed (%d)\n",
+						regulators[i].name, rc);
+		}
+
+		/* Free the regulator source */
+		if (regulators[i].state & VREG_GET_REGULATOR_MASK)
+			regulator_put(regulators[i].regulator);
+
+		regulators[i].state = VREG_NULL_CONFIG;
+	}
+}
+
+/* Common helper routine to turn on all WCNSS vregs e.g. IRIS, Riva */
+static int wcnss_vregs_on(struct device *dev,
+		struct vregs_info regulators[], uint size)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < size; i++) {
+			/* Get regulator source */
+		regulators[i].regulator =
+			regulator_get(dev, regulators[i].name);
+		if (IS_ERR(regulators[i].regulator)) {
+			rc = PTR_ERR(regulators[i].regulator);
+				pr_err("regulator get of %s failed (%d)\n",
+					regulators[i].name, rc);
+				goto fail;
+		}
+		regulators[i].state |= VREG_GET_REGULATOR_MASK;
+
+		/* Set voltage to nominal. Exclude swtiches e.g. LVS */
+		if (regulators[i].nominal_min || regulators[i].max_voltage) {
+			rc = regulator_set_voltage(regulators[i].regulator,
+					regulators[i].nominal_min,
+					regulators[i].max_voltage);
+			if (rc) {
+				pr_err("regulator_set_voltage(%s) failed (%d)\n",
+						regulators[i].name, rc);
+				goto fail;
+			}
+			regulators[i].state |= VREG_SET_VOLTAGE_MASK;
+		}
+
+		/* Vote for PWM/PFM mode if needed */
+		if (regulators[i].uA_load) {
+			rc = regulator_set_optimum_mode(regulators[i].regulator,
+					regulators[i].uA_load);
+			if (rc < 0) {
+				pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
+						regulators[i].name, rc);
+				goto fail;
+			}
+			regulators[i].state |= VREG_OPTIMUM_MODE_MASK;
+		}
+
+		/* Enable the regulator */
+		rc = regulator_enable(regulators[i].regulator);
+		if (rc) {
+			pr_err("vreg %s enable failed (%d)\n",
+				regulators[i].name, rc);
+			goto fail;
+		}
+		regulators[i].state |= VREG_ENABLE_MASK;
+	}
+
+	return rc;
+
+fail:
+	wcnss_vregs_off(regulators, size);
+	return rc;
+
+}
+
+static void wcnss_iris_vregs_off(void)
+{
+	wcnss_vregs_off(iris_vregs, ARRAY_SIZE(iris_vregs));
+}
+
+static int wcnss_iris_vregs_on(struct device *dev)
+{
+	return wcnss_vregs_on(dev, iris_vregs, ARRAY_SIZE(iris_vregs));
+}
+
+static void wcnss_riva_vregs_off(void)
+{
+	wcnss_vregs_off(riva_vregs, ARRAY_SIZE(riva_vregs));
+}
+
+static int wcnss_riva_vregs_on(struct device *dev)
+{
+	return wcnss_vregs_on(dev, riva_vregs, ARRAY_SIZE(riva_vregs));
+}
+
+int wcnss_wlan_power(struct device *dev,
+		struct wcnss_wlan_config *cfg,
+		enum wcnss_opcode on)
+{
+	int rc = 0;
+
+	if (on) {
+		/* RIVA regulator settings */
+		rc = wcnss_riva_vregs_on(dev);
+		if (rc)
+			goto fail_riva_on;
+
+		/* IRIS regulator settings */
+		rc = wcnss_iris_vregs_on(dev);
+		if (rc)
+			goto fail_iris_on;
+
+		/* Configure IRIS XO */
+		rc = configure_iris_xo(cfg->use_48mhz_xo, WCNSS_WLAN_SWITCH_ON);
+		if (rc)
+			goto fail_iris_xo;
+
+	} else {
+		configure_iris_xo(cfg->use_48mhz_xo, WCNSS_WLAN_SWITCH_OFF);
+		wcnss_iris_vregs_off();
+		wcnss_riva_vregs_off();
+	}
+
+	return rc;
+
+fail_iris_xo:
+	wcnss_iris_vregs_off();
+
+fail_iris_on:
+	wcnss_riva_vregs_off();
+
+fail_riva_on:
+	return rc;
+}
+EXPORT_SYMBOL(wcnss_wlan_power);
+
diff --git a/drivers/net/wireless/wcnss/wcnss_riva.h b/drivers/net/wireless/wcnss/wcnss_riva.h
new file mode 100644
index 0000000..e037f58
--- /dev/null
+++ b/drivers/net/wireless/wcnss/wcnss_riva.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _WCNSS_RIVA_H_
+#define _WCNSS_RIVA_H_
+
+#include <linux/device.h>
+
+enum wcnss_opcode {
+	WCNSS_WLAN_SWITCH_OFF = 0,
+	WCNSS_WLAN_SWITCH_ON,
+};
+
+struct wcnss_wlan_config {
+	int		use_48mhz_xo;
+};
+
+int wcnss_wlan_power(struct device *dev,
+		struct wcnss_wlan_config *cfg,
+		enum wcnss_opcode opcode);
+
+#endif /* _WCNSS_RIVA_H_ */
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
new file mode 100644
index 0000000..371e58e
--- /dev/null
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -0,0 +1,339 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/parser.h>
+#include <linux/wcnss_wlan.h>
+#include <mach/peripheral-loader.h>
+#include "wcnss_riva.h"
+
+#define DEVICE "wcnss_wlan"
+#define VERSION "1.01"
+#define WCNSS_PIL_DEVICE "wcnss"
+#define WCNSS_NV_NAME "wlan/prima/WCNSS_qcom_cfg.ini"
+
+/* By default assume 48MHz XO is populated */
+#define CONFIG_USE_48MHZ_XO_DEFAULT 1
+
+static struct {
+	struct platform_device *pdev;
+	void		*pil;
+	struct resource	*mmio_res;
+	struct resource	*tx_irq_res;
+	struct resource	*rx_irq_res;
+	const struct dev_pm_ops *pm_ops;
+	int             smd_channel_ready;
+	struct wcnss_wlan_config wlan_config;
+} *penv = NULL;
+
+enum {
+	nv_none = -1,
+	nv_use_48mhz_xo,
+	nv_end,
+};
+
+static const match_table_t nv_tokens = {
+	{nv_use_48mhz_xo, "gUse48MHzXO=%d"},
+	{nv_end, "END"},
+	{nv_none, NULL}
+};
+
+static void wcnss_init_config(void)
+{
+	penv->wlan_config.use_48mhz_xo = CONFIG_USE_48MHZ_XO_DEFAULT;
+}
+
+static void wcnss_parse_nv(char *nvp)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *cur;
+	char *tok;
+	int token;
+	int intval;
+
+	cur = nvp;
+	while (cur != NULL) {
+		if ('#' == *cur) {
+			/* comment, consume remainder of line */
+			tok = strsep(&cur, "\r\n");
+			continue;
+		}
+
+		tok = strsep(&cur, " \t\r\n,");
+		if (!*tok)
+			continue;
+
+		token = match_token(tok, nv_tokens, args);
+		switch (token) {
+		case nv_use_48mhz_xo:
+			if (match_int(&args[0], &intval)) {
+				dev_err(&penv->pdev->dev,
+					"Invalid value for gUse48MHzXO: %s\n",
+					args[0].from);
+				continue;
+			}
+			if ((0 > intval) || (1 < intval)) {
+				dev_err(&penv->pdev->dev,
+					"Invalid value for gUse48MHzXO: %d\n",
+					intval);
+				continue;
+			}
+			penv->wlan_config.use_48mhz_xo = intval;
+			dev_info(&penv->pdev->dev,
+					"gUse48MHzXO set to %d\n", intval);
+			break;
+		case nv_end:
+			/* end of options so we are done */
+			return;
+		default:
+			/* silently ignore unknown settings */
+			break;
+		}
+	}
+}
+
+static int __devinit
+wcnss_wlan_ctrl_probe(struct platform_device *pdev)
+{
+	if (penv)
+		penv->smd_channel_ready = 1;
+
+	pr_info("%s: SMD ctrl channel up\n", __func__);
+
+	return 0;
+}
+
+static int __devexit
+wcnss_wlan_ctrl_remove(struct platform_device *pdev)
+{
+	if (penv)
+		penv->smd_channel_ready = 0;
+
+	pr_info("%s: SMD ctrl channel down\n", __func__);
+
+	return 0;
+}
+
+
+static struct platform_driver wcnss_wlan_ctrl_driver = {
+	.driver = {
+		.name	= "WLAN_CTRL",
+		.owner	= THIS_MODULE,
+	},
+	.probe	= wcnss_wlan_ctrl_probe,
+	.remove	= __devexit_p(wcnss_wlan_ctrl_remove),
+};
+
+struct device *wcnss_wlan_get_device(void)
+{
+	if (penv && penv->pdev && penv->smd_channel_ready)
+		return &penv->pdev->dev;
+	return NULL;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_device);
+
+struct resource *wcnss_wlan_get_memory_map(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) && penv->smd_channel_ready)
+		return penv->mmio_res;
+	return NULL;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_memory_map);
+
+int wcnss_wlan_get_dxe_tx_irq(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+				penv->tx_irq_res && penv->smd_channel_ready)
+		return penv->tx_irq_res->start;
+	return WCNSS_WLAN_IRQ_INVALID;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_dxe_tx_irq);
+
+int wcnss_wlan_get_dxe_rx_irq(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+				penv->rx_irq_res && penv->smd_channel_ready)
+		return penv->rx_irq_res->start;
+	return WCNSS_WLAN_IRQ_INVALID;
+}
+EXPORT_SYMBOL(wcnss_wlan_get_dxe_rx_irq);
+
+void wcnss_wlan_register_pm_ops(struct device *dev,
+				const struct dev_pm_ops *pm_ops)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) && pm_ops)
+		penv->pm_ops = pm_ops;
+}
+EXPORT_SYMBOL(wcnss_wlan_register_pm_ops);
+
+static int wcnss_wlan_suspend(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+	    penv->smd_channel_ready &&
+	    penv->pm_ops && penv->pm_ops->suspend)
+		return penv->pm_ops->suspend(dev);
+	return 0;
+}
+
+static int wcnss_wlan_resume(struct device *dev)
+{
+	if (penv && dev && (dev == &penv->pdev->dev) &&
+	    penv->smd_channel_ready &&
+	    penv->pm_ops && penv->pm_ops->resume)
+		return penv->pm_ops->resume(dev);
+	return 0;
+}
+
+static int __devinit
+wcnss_wlan_probe(struct platform_device *pdev)
+{
+	const struct firmware *nv;
+	char *nvp;
+	int ret;
+
+	/* verify we haven't been called more than once */
+	if (penv) {
+		dev_err(&pdev->dev, "cannot handle multiple devices.\n");
+		return -ENODEV;
+	}
+
+	/* create an environment to track the device */
+	penv = kzalloc(sizeof(*penv), GFP_KERNEL);
+	if (!penv) {
+		dev_err(&pdev->dev, "cannot allocate device memory.\n");
+		return -ENOMEM;
+	}
+	penv->pdev = pdev;
+
+	/* initialize the WCNSS default configuration */
+	wcnss_init_config();
+
+	/* update the WCNSS configuration from NV if present */
+	ret = request_firmware(&nv, WCNSS_NV_NAME, &pdev->dev);
+	if (!ret) {
+		/* firmware is read-only so make a NUL-terminated copy */
+		nvp = kmalloc(nv->size+1, GFP_KERNEL);
+		if (nvp) {
+			memcpy(nvp, nv->data, nv->size);
+			nvp[nv->size] = '\0';
+			wcnss_parse_nv(nvp);
+			kfree(nvp);
+		} else {
+			dev_err(&pdev->dev, "cannot parse NV.\n");
+		}
+		release_firmware(nv);
+	} else {
+		dev_err(&pdev->dev, "cannot read NV.\n");
+	}
+
+	/* power up the WCNSS */
+	ret = wcnss_wlan_power(&pdev->dev, &penv->wlan_config,
+					WCNSS_WLAN_SWITCH_ON);
+	if (ret) {
+		dev_err(&pdev->dev, "WCNSS Power-up failed.\n");
+		goto fail_power;
+	}
+
+	/* trigger initialization of the WCNSS */
+	penv->pil = pil_get(WCNSS_PIL_DEVICE);
+	if (IS_ERR(penv->pil)) {
+		dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
+		ret = PTR_ERR(penv->pil);
+		penv->pil = NULL;
+		goto fail_pil;
+	}
+
+	/* allocate resources */
+	penv->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"wcnss_mmio");
+	penv->tx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							"wcnss_wlantx_irq");
+	penv->rx_irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							"wcnss_wlanrx_irq");
+
+	if (!(penv->mmio_res && penv->tx_irq_res && penv->rx_irq_res)) {
+		dev_err(&pdev->dev, "insufficient resources\n");
+		ret = -ENOENT;
+		goto fail_res;
+	}
+
+	return 0;
+
+fail_res:
+	if (penv->pil)
+		pil_put(penv->pil);
+fail_pil:
+	wcnss_wlan_power(&pdev->dev, &penv->wlan_config,
+				WCNSS_WLAN_SWITCH_OFF);
+fail_power:
+	kfree(penv);
+	penv = NULL;
+	return ret;
+}
+
+static int __devexit
+wcnss_wlan_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+
+static const struct dev_pm_ops wcnss_wlan_pm_ops = {
+	.suspend	= wcnss_wlan_suspend,
+	.resume		= wcnss_wlan_resume,
+};
+
+static struct platform_driver wcnss_wlan_driver = {
+	.driver = {
+		.name	= DEVICE,
+		.owner	= THIS_MODULE,
+		.pm	= &wcnss_wlan_pm_ops,
+	},
+	.probe	= wcnss_wlan_probe,
+	.remove	= __devexit_p(wcnss_wlan_remove),
+};
+
+static int __init wcnss_wlan_init(void)
+{
+	platform_driver_register(&wcnss_wlan_driver);
+	platform_driver_register(&wcnss_wlan_ctrl_driver);
+
+	return 0;
+}
+
+static void __exit wcnss_wlan_exit(void)
+{
+	if (penv) {
+		if (penv->pil)
+			pil_put(penv->pil);
+
+		wcnss_wlan_power(&penv->pdev->dev, &penv->wlan_config,
+					WCNSS_WLAN_SWITCH_OFF);
+
+		kfree(penv);
+		penv = NULL;
+	}
+
+	platform_driver_unregister(&wcnss_wlan_ctrl_driver);
+	platform_driver_unregister(&wcnss_wlan_driver);
+}
+
+module_init(wcnss_wlan_init);
+module_exit(wcnss_wlan_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(VERSION);
+MODULE_DESCRIPTION(DEVICE "Driver");