msm: bam_dmux: add loopback support

Bam dmux is currently very difficult to isolate and provide measurements
for common metrics, such as throughput and latency.

Add hooks into bam_dmux to allow for a seperate dynamically
loadable loopback module. This allows bam dmux to be isolated, and to
provide more accurate measurements of the above metrics.

Change-Id: I174d900dea73cca27d32a54a908f43728059be30
Signed-off-by: Brent Hronik <bhronik@codeaurora.org>
Signed-off-by: Arun Kumar Neelakantam <aneela@codeaurora.org>
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index cc55534..83a1290 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -36,19 +36,12 @@
 #include <mach/socinfo.h>
 #include <mach/subsystem_restart.h>
 
+#include "bam_dmux_private.h"
+
 #define BAM_CH_LOCAL_OPEN       0x1
 #define BAM_CH_REMOTE_OPEN      0x2
 #define BAM_CH_IN_RESET         0x4
 
-#define BAM_MUX_HDR_MAGIC_NO    0x33fc
-
-#define BAM_MUX_HDR_CMD_DATA		0
-#define BAM_MUX_HDR_CMD_OPEN		1
-#define BAM_MUX_HDR_CMD_CLOSE		2
-#define BAM_MUX_HDR_CMD_STATUS		3 /* unused */
-#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC	4
-
-
 #define LOW_WATERMARK		2
 #define HIGH_WATERMARK		4
 #define DEFAULT_POLLING_MIN_SLEEP (950)
@@ -72,6 +65,33 @@
 			bam_adaptive_timer_enabled,
 		   int, S_IRUGO | S_IWUSR | S_IWGRP);
 
+static struct bam_ops_if bam_default_ops = {
+	/* smsm */
+	.smsm_change_state_ptr = &smsm_change_state,
+	.smsm_get_state_ptr = &smsm_get_state,
+	.smsm_state_cb_register_ptr = &smsm_state_cb_register,
+	.smsm_state_cb_deregister_ptr = &smsm_state_cb_deregister,
+
+	/* sps */
+	.sps_connect_ptr = &sps_connect,
+	.sps_disconnect_ptr = &sps_disconnect,
+	.sps_register_bam_device_ptr = &sps_register_bam_device,
+	.sps_deregister_bam_device_ptr = &sps_deregister_bam_device,
+	.sps_alloc_endpoint_ptr = &sps_alloc_endpoint,
+	.sps_free_endpoint_ptr = &sps_free_endpoint,
+	.sps_set_config_ptr = &sps_set_config,
+	.sps_get_config_ptr = &sps_get_config,
+	.sps_device_reset_ptr = &sps_device_reset,
+	.sps_register_event_ptr = &sps_register_event,
+	.sps_transfer_one_ptr = &sps_transfer_one,
+	.sps_get_iovec_ptr = &sps_get_iovec,
+	.sps_get_unused_desc_num_ptr = &sps_get_unused_desc_num,
+
+	.dma_to = DMA_TO_DEVICE,
+	.dma_from = DMA_FROM_DEVICE,
+};
+static struct bam_ops_if *bam_ops = &bam_default_ops;
+
 #if defined(DEBUG)
 static uint32_t bam_dmux_read_cnt;
 static uint32_t bam_dmux_write_cnt;
@@ -151,30 +171,11 @@
 	int use_wm;
 };
 
-struct tx_pkt_info {
-	struct sk_buff *skb;
-	dma_addr_t dma_address;
-	char is_cmd;
-	uint32_t len;
-	struct work_struct work;
-	struct list_head list_node;
-	unsigned ts_sec;
-	unsigned long ts_nsec;
-};
-
-struct rx_pkt_info {
-	struct sk_buff *skb;
-	dma_addr_t dma_address;
-	struct work_struct work;
-	struct list_head list_node;
-};
-
 #define A2_NUM_PIPES		6
 #define A2_SUMMING_THRESHOLD	4096
 #define A2_DEFAULT_DESCRIPTORS	32
 #define A2_PHYS_BASE		0x124C2000
 #define A2_PHYS_SIZE		0x2000
-#define BUFFER_SIZE		2048
 #define NUM_BUFFERS		32
 
 #ifndef A2_BAM_IRQ
@@ -209,15 +210,6 @@
 static DEFINE_SPINLOCK(bam_tx_pool_spinlock);
 static DEFINE_MUTEX(bam_pdev_mutexlock);
 
-struct bam_mux_hdr {
-	uint16_t magic_num;
-	uint8_t reserved;
-	uint8_t cmd;
-	uint8_t pad_len;
-	uint8_t ch_id;
-	uint16_t pkt_len;
-};
-
 static void notify_all(int event, unsigned long data);
 static void bam_mux_write_done(struct work_struct *work);
 static void handle_bam_mux_cmd(struct work_struct *work);
@@ -426,7 +418,7 @@
 		ptr = skb_put(info->skb, BUFFER_SIZE);
 
 		info->dma_address = dma_map_single(NULL, ptr, BUFFER_SIZE,
-							DMA_FROM_DEVICE);
+							bam_ops->dma_from);
 		if (info->dma_address == 0 || info->dma_address == ~0) {
 			DMUX_LOG_KERR("%s: dma_map_single failure %p for %p\n",
 				__func__, (void *)info->dma_address, ptr);
@@ -436,8 +428,8 @@
 		mutex_lock(&bam_rx_pool_mutexlock);
 		list_add_tail(&info->list_node, &bam_rx_pool);
 		rx_len_cached = ++bam_rx_pool_len;
-		ret = sps_transfer_one(bam_rx_pipe, info->dma_address,
-			BUFFER_SIZE, info, 0);
+		ret = bam_ops->sps_transfer_one_ptr(bam_rx_pipe,
+				info->dma_address, BUFFER_SIZE, info, 0);
 		if (ret) {
 			list_del(&info->list_node);
 			rx_len_cached = --bam_rx_pool_len;
@@ -446,7 +438,7 @@
 				__func__, ret);
 
 			dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
-						DMA_FROM_DEVICE);
+						bam_ops->dma_from);
 
 			goto fail_skb;
 		}
@@ -534,7 +526,8 @@
 
 	info = container_of(work, struct rx_pkt_info, work);
 	rx_skb = info->skb;
-	dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE, DMA_FROM_DEVICE);
+	dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
+			bam_ops->dma_from);
 	kfree(info);
 
 	rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
@@ -643,7 +636,7 @@
 	}
 
 	dma_address = dma_map_single(NULL, data, len,
-					DMA_TO_DEVICE);
+					bam_ops->dma_to);
 	if (!dma_address) {
 		pr_err("%s: dma_map_single() failed\n", __func__);
 		kfree(pkt);
@@ -658,7 +651,7 @@
 	INIT_WORK(&pkt->work, bam_mux_write_done);
 	spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
 	list_add_tail(&pkt->list_node, &bam_tx_pool);
-	rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
+	rc = bam_ops->sps_transfer_one_ptr(bam_tx_pipe, dma_address, len,
 				pkt, SPS_IOVEC_FLAG_EOT);
 	if (rc) {
 		DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
@@ -668,7 +661,7 @@
 		spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
 		dma_unmap_single(NULL, pkt->dma_address,
 					pkt->len,
-					DMA_TO_DEVICE);
+					bam_ops->dma_to);
 		kfree(pkt);
 	} else {
 		spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
@@ -819,7 +812,7 @@
 	}
 
 	dma_address = dma_map_single(NULL, skb->data, skb->len,
-					DMA_TO_DEVICE);
+					bam_ops->dma_to);
 	if (!dma_address) {
 		pr_err("%s: dma_map_single() failed\n", __func__);
 		goto write_fail3;
@@ -831,7 +824,7 @@
 	INIT_WORK(&pkt->work, bam_mux_write_done);
 	spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
 	list_add_tail(&pkt->list_node, &bam_tx_pool);
-	rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
+	rc = bam_ops->sps_transfer_one_ptr(bam_tx_pipe, dma_address, skb->len,
 				pkt, SPS_IOVEC_FLAG_EOT);
 	if (rc) {
 		DMUX_LOG_KERR("%s sps_transfer_one failed rc=%d\n",
@@ -840,7 +833,7 @@
 		DBG_INC_TX_SPS_FAILURE_CNT();
 		spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
 		dma_unmap_single(NULL, pkt->dma_address,
-					pkt->skb->len,	DMA_TO_DEVICE);
+					pkt->skb->len,	bam_ops->dma_to);
 		kfree(pkt);
 		if (new_skb)
 			dev_kfree_skb_any(new_skb);
@@ -1048,14 +1041,14 @@
 	 * Attempt to enable interrupts - if this fails,
 	 * continue polling and we will retry later.
 	 */
-	ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
+	ret = bam_ops->sps_get_config_ptr(bam_rx_pipe, &cur_rx_conn);
 	if (ret) {
 		pr_err("%s: sps_get_config() failed %d\n", __func__, ret);
 		goto fail;
 	}
 
 	rx_register_event.options = SPS_O_EOT;
-	ret = sps_register_event(bam_rx_pipe, &rx_register_event);
+	ret = bam_ops->sps_register_event_ptr(bam_rx_pipe, &rx_register_event);
 	if (ret) {
 		pr_err("%s: sps_register_event() failed %d\n", __func__, ret);
 		goto fail;
@@ -1063,7 +1056,7 @@
 
 	cur_rx_conn.options = SPS_O_AUTO_ENABLE |
 		SPS_O_EOT | SPS_O_ACK_TRANSFERS;
-	ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
+	ret = bam_ops->sps_set_config_ptr(bam_rx_pipe, &cur_rx_conn);
 	if (ret) {
 		pr_err("%s: sps_set_config() failed %d\n", __func__, ret);
 		goto fail;
@@ -1074,7 +1067,7 @@
 
 	/* handle any rx packets before interrupt was enabled */
 	while (bam_connection_is_active && !polling_mode) {
-		ret = sps_get_iovec(bam_rx_pipe, &iov);
+		ret = bam_ops->sps_get_iovec_ptr(bam_rx_pipe, &iov);
 		if (ret) {
 			pr_err("%s: sps_get_iovec failed %d\n",
 					__func__, ret);
@@ -1159,7 +1152,7 @@
 				return;
 			}
 
-			ret = sps_get_iovec(bam_rx_pipe, &iov);
+			ret = bam_ops->sps_get_iovec_ptr(bam_rx_pipe, &iov);
 			if (ret) {
 				DMUX_LOG_KERR("%s: sps_get_iovec failed %d\n",
 						__func__, ret);
@@ -1208,7 +1201,7 @@
 		if (bam_adaptive_timer_enabled) {
 			usleep_range(rx_timer_interval, rx_timer_interval + 50);
 
-			ret = sps_get_unused_desc_num(bam_rx_pipe,
+			ret = bam_ops->sps_get_unused_desc_num_ptr(bam_rx_pipe,
 						&buffs_unused);
 
 			if (ret) {
@@ -1260,11 +1253,11 @@
 		if (!pkt->is_cmd)
 			dma_unmap_single(NULL, pkt->dma_address,
 						pkt->skb->len,
-						DMA_TO_DEVICE);
+						bam_ops->dma_to);
 		else
 			dma_unmap_single(NULL, pkt->dma_address,
 						pkt->len,
-						DMA_TO_DEVICE);
+						bam_ops->dma_to);
 		queue_work(bam_mux_tx_workqueue, &pkt->work);
 		break;
 	default:
@@ -1287,7 +1280,8 @@
 	case SPS_EVENT_EOT:
 		/* attempt to disable interrupts in this pipe */
 		if (!polling_mode) {
-			ret = sps_get_config(bam_rx_pipe, &cur_rx_conn);
+			ret = bam_ops->sps_get_config_ptr(bam_rx_pipe,
+					&cur_rx_conn);
 			if (ret) {
 				pr_err("%s: sps_get_config() failed %d, interrupts"
 					" not disabled\n", __func__, ret);
@@ -1295,7 +1289,8 @@
 			}
 			cur_rx_conn.options = SPS_O_AUTO_ENABLE |
 				SPS_O_ACK_TRANSFERS | SPS_O_POLL;
-			ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
+			ret = bam_ops->sps_set_config_ptr(bam_rx_pipe,
+					&cur_rx_conn);
 			if (ret) {
 				pr_err("%s: sps_set_config() failed %d, interrupts"
 					" not disabled\n", __func__, ret);
@@ -1475,9 +1470,11 @@
 
 	bam_dmux_uplink_vote = vote;
 	if (vote)
-		smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
+		bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE,
+			0, SMSM_A2_POWER_CONTROL);
 	else
-		smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
+		bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE,
+			SMSM_A2_POWER_CONTROL, 0);
 }
 
 /*
@@ -1746,29 +1743,31 @@
 	if (!power_management_only_mode) {
 		if (ssr_skipped_disconnect) {
 			/* delayed to here to prevent bus stall */
-			sps_disconnect(bam_tx_pipe);
-			sps_disconnect(bam_rx_pipe);
+			bam_ops->sps_disconnect_ptr(bam_tx_pipe);
+			bam_ops->sps_disconnect_ptr(bam_rx_pipe);
 			__memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
 			__memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
 		}
 		ssr_skipped_disconnect = 0;
-		i = sps_device_reset(a2_device_handle);
+		i = bam_ops->sps_device_reset_ptr(a2_device_handle);
 		if (i)
 			pr_err("%s: device reset failed rc = %d\n", __func__,
 									i);
-		i = sps_connect(bam_tx_pipe, &tx_connection);
+		i = bam_ops->sps_connect_ptr(bam_tx_pipe, &tx_connection);
 		if (i)
 			pr_err("%s: tx connection failed rc = %d\n", __func__,
 									i);
-		i = sps_connect(bam_rx_pipe, &rx_connection);
+		i = bam_ops->sps_connect_ptr(bam_rx_pipe, &rx_connection);
 		if (i)
 			pr_err("%s: rx connection failed rc = %d\n", __func__,
 									i);
-		i = sps_register_event(bam_tx_pipe, &tx_register_event);
+		i = bam_ops->sps_register_event_ptr(bam_tx_pipe,
+				&tx_register_event);
 		if (i)
 			pr_err("%s: tx event reg failed rc = %d\n", __func__,
 									i);
-		i = sps_register_event(bam_rx_pipe, &rx_register_event);
+		i = bam_ops->sps_register_event_ptr(bam_rx_pipe,
+				&rx_register_event);
 		if (i)
 			pr_err("%s: rx event reg failed rc = %d\n", __func__,
 									i);
@@ -1822,9 +1821,9 @@
 	if (!power_management_only_mode) {
 		if (likely(!in_ssr)) {
 			BAM_DMUX_LOG("%s: disconnect tx\n", __func__);
-			sps_disconnect(bam_tx_pipe);
+			bam_ops->sps_disconnect_ptr(bam_tx_pipe);
 			BAM_DMUX_LOG("%s: disconnect rx\n", __func__);
-			sps_disconnect(bam_rx_pipe);
+			bam_ops->sps_disconnect_ptr(bam_rx_pipe);
 			__memzero(rx_desc_mem_buf.base, rx_desc_mem_buf.size);
 			__memzero(tx_desc_mem_buf.base, tx_desc_mem_buf.size);
 			BAM_DMUX_LOG("%s: device reset\n", __func__);
@@ -1841,7 +1840,7 @@
 		list_del(node);
 		info = container_of(node, struct rx_pkt_info, list_node);
 		dma_unmap_single(NULL, info->dma_address, BUFFER_SIZE,
-							DMA_FROM_DEVICE);
+							bam_ops->dma_from);
 		dev_kfree_skb_any(info->skb);
 		kfree(info);
 	}
@@ -2007,12 +2006,12 @@
 		if (!info->is_cmd) {
 			dma_unmap_single(NULL, info->dma_address,
 						info->skb->len,
-						DMA_TO_DEVICE);
+						bam_ops->dma_to);
 			dev_kfree_skb_any(info->skb);
 		} else {
 			dma_unmap_single(NULL, info->dma_address,
 						info->len,
-						DMA_TO_DEVICE);
+						bam_ops->dma_to);
 			kfree(info->skb);
 		}
 		kfree(info);
@@ -2050,20 +2049,20 @@
 	if (cpu_is_msm9615())
 		a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
 	/* need to free on tear down */
-	ret = sps_register_bam_device(&a2_props, &h);
+	ret = bam_ops->sps_register_bam_device_ptr(&a2_props, &h);
 	if (ret < 0) {
 		pr_err("%s: register bam error %d\n", __func__, ret);
 		goto register_bam_failed;
 	}
 	a2_device_handle = h;
 
-	bam_tx_pipe = sps_alloc_endpoint();
+	bam_tx_pipe = bam_ops->sps_alloc_endpoint_ptr();
 	if (bam_tx_pipe == NULL) {
 		pr_err("%s: tx alloc endpoint failed\n", __func__);
 		ret = -ENOMEM;
 		goto tx_alloc_endpoint_failed;
 	}
-	ret = sps_get_config(bam_tx_pipe, &tx_connection);
+	ret = bam_ops->sps_get_config_ptr(bam_tx_pipe, &tx_connection);
 	if (ret) {
 		pr_err("%s: tx get config failed %d\n", __func__, ret);
 		goto tx_get_config_failed;
@@ -2088,19 +2087,19 @@
 	tx_connection.desc = tx_desc_mem_buf;
 	tx_connection.event_thresh = 0x10;
 
-	ret = sps_connect(bam_tx_pipe, &tx_connection);
+	ret = bam_ops->sps_connect_ptr(bam_tx_pipe, &tx_connection);
 	if (ret < 0) {
 		pr_err("%s: tx connect error %d\n", __func__, ret);
 		goto tx_connect_failed;
 	}
 
-	bam_rx_pipe = sps_alloc_endpoint();
+	bam_rx_pipe = bam_ops->sps_alloc_endpoint_ptr();
 	if (bam_rx_pipe == NULL) {
 		pr_err("%s: rx alloc endpoint failed\n", __func__);
 		ret = -ENOMEM;
 		goto rx_alloc_endpoint_failed;
 	}
-	ret = sps_get_config(bam_rx_pipe, &rx_connection);
+	ret = bam_ops->sps_get_config_ptr(bam_rx_pipe, &rx_connection);
 	if (ret) {
 		pr_err("%s: rx get config failed %d\n", __func__, ret);
 		goto rx_get_config_failed;
@@ -2126,7 +2125,7 @@
 	rx_connection.desc = rx_desc_mem_buf;
 	rx_connection.event_thresh = 0x10;
 
-	ret = sps_connect(bam_rx_pipe, &rx_connection);
+	ret = bam_ops->sps_connect_ptr(bam_rx_pipe, &rx_connection);
 	if (ret < 0) {
 		pr_err("%s: rx connect error %d\n", __func__, ret);
 		goto rx_connect_failed;
@@ -2137,7 +2136,7 @@
 	tx_register_event.xfer_done = NULL;
 	tx_register_event.callback = bam_mux_tx_notify;
 	tx_register_event.user = NULL;
-	ret = sps_register_event(bam_tx_pipe, &tx_register_event);
+	ret = bam_ops->sps_register_event_ptr(bam_tx_pipe, &tx_register_event);
 	if (ret < 0) {
 		pr_err("%s: tx register event error %d\n", __func__, ret);
 		goto rx_event_reg_failed;
@@ -2148,7 +2147,7 @@
 	rx_register_event.xfer_done = NULL;
 	rx_register_event.callback = bam_mux_rx_notify;
 	rx_register_event.user = NULL;
-	ret = sps_register_event(bam_rx_pipe, &rx_register_event);
+	ret = bam_ops->sps_register_event_ptr(bam_rx_pipe, &rx_register_event);
 	if (ret < 0) {
 		pr_err("%s: tx register event error %d\n", __func__, ret);
 		goto rx_event_reg_failed;
@@ -2168,22 +2167,22 @@
 	return 0;
 
 rx_event_reg_failed:
-	sps_disconnect(bam_rx_pipe);
+	bam_ops->sps_disconnect_ptr(bam_rx_pipe);
 rx_connect_failed:
 	dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
 				rx_desc_mem_buf.phys_base);
 rx_mem_failed:
 rx_get_config_failed:
-	sps_free_endpoint(bam_rx_pipe);
+	bam_ops->sps_free_endpoint_ptr(bam_rx_pipe);
 rx_alloc_endpoint_failed:
-	sps_disconnect(bam_tx_pipe);
+	bam_ops->sps_disconnect_ptr(bam_tx_pipe);
 tx_connect_failed:
 	dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
 				tx_desc_mem_buf.phys_base);
 tx_get_config_failed:
-	sps_free_endpoint(bam_tx_pipe);
+	bam_ops->sps_free_endpoint_ptr(bam_tx_pipe);
 tx_alloc_endpoint_failed:
-	sps_deregister_bam_device(h);
+	bam_ops->sps_deregister_bam_device_ptr(h);
 	/*
 	 * sps_deregister_bam_device() calls iounmap.  calling iounmap on the
 	 * same handle below will cause a crash, so skip it if we've freed
@@ -2221,7 +2220,7 @@
 	a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
 	if (cpu_is_msm9615())
 		a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
-	ret = sps_register_bam_device(&a2_props, &h);
+	ret = bam_ops->sps_register_bam_device_ptr(&a2_props, &h);
 	if (ret < 0) {
 		pr_err("%s: register bam error %d\n", __func__, ret);
 		goto register_bam_failed;
@@ -2273,7 +2272,7 @@
 
 	BAM_DMUX_LOG("%s: apps ack %d->%d\n", __func__,
 			clear_bit & 0x1, ~clear_bit & 0x1);
-	smsm_change_state(SMSM_APPS_STATE,
+	bam_ops->smsm_change_state_ptr(SMSM_APPS_STATE,
 				clear_bit & SMSM_A2_POWER_CONTROL_ACK,
 				~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
 	clear_bit = ~clear_bit;
@@ -2330,6 +2329,51 @@
 	complete_all(&ul_wakeup_ack_completion);
 }
 
+/**
+ * msm_bam_dmux_set_bam_ops() - sets the bam_ops
+ * @ops: bam_ops_if to set
+ *
+ * Sets bam_ops to allow switching of runtime behavior. Preconditon, bam dmux
+ * must be in an idle state. If input ops is NULL, then bam_ops will be
+ * restored to their default state.
+ */
+void msm_bam_dmux_set_bam_ops(struct bam_ops_if *ops)
+{
+	if (ops != NULL)
+		bam_ops = ops;
+	else
+		bam_ops = &bam_default_ops;
+}
+EXPORT_SYMBOL(msm_bam_dmux_set_bam_ops);
+
+/**
+ * msm_bam_dmux_deinit() - puts bam dmux into a deinited state
+ *
+ * Puts bam dmux into a deinitialized state by simulating an ssr.
+ */
+void msm_bam_dmux_deinit(void)
+{
+	restart_notifier_cb(NULL, SUBSYS_BEFORE_SHUTDOWN, NULL);
+	restart_notifier_cb(NULL, SUBSYS_AFTER_SHUTDOWN, NULL);
+}
+EXPORT_SYMBOL(msm_bam_dmux_deinit);
+
+/**
+ * msm_bam_dmux_reinit() - reinitializes bam dmux
+ */
+void msm_bam_dmux_reinit(void)
+{
+	bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
+			SMSM_A2_POWER_CONTROL,
+			bam_dmux_smsm_cb, NULL);
+	bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
+			SMSM_A2_POWER_CONTROL_ACK,
+			bam_dmux_smsm_ack_cb, NULL);
+	bam_mux_initialized = 0;
+	bam_init();
+}
+EXPORT_SYMBOL(msm_bam_dmux_reinit);
+
 static int bam_dmux_probe(struct platform_device *pdev)
 {
 	int rc;
@@ -2416,8 +2460,9 @@
 	INIT_DELAYED_WORK(&queue_rx_work, queue_rx_work_func);
 	wake_lock_init(&bam_wakelock, WAKE_LOCK_SUSPEND, "bam_dmux_wakelock");
 
-	rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
-					bam_dmux_smsm_cb, NULL);
+	rc = bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
+			SMSM_A2_POWER_CONTROL,
+			bam_dmux_smsm_cb, NULL);
 
 	if (rc) {
 		destroy_workqueue(bam_mux_rx_workqueue);
@@ -2426,13 +2471,14 @@
 		return -ENOMEM;
 	}
 
-	rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL_ACK,
-					bam_dmux_smsm_ack_cb, NULL);
+	rc = bam_ops->smsm_state_cb_register_ptr(SMSM_MODEM_STATE,
+			SMSM_A2_POWER_CONTROL_ACK,
+			bam_dmux_smsm_ack_cb, NULL);
 
 	if (rc) {
 		destroy_workqueue(bam_mux_rx_workqueue);
 		destroy_workqueue(bam_mux_tx_workqueue);
-		smsm_state_cb_deregister(SMSM_MODEM_STATE,
+		bam_ops->smsm_state_cb_deregister_ptr(SMSM_MODEM_STATE,
 					SMSM_A2_POWER_CONTROL,
 					bam_dmux_smsm_cb, NULL);
 		pr_err("%s: smsm ack cb register failed, rc: %d\n", __func__,
@@ -2442,8 +2488,10 @@
 		return -ENOMEM;
 	}
 
-	if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
-		bam_dmux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
+	if (bam_ops->smsm_get_state_ptr(SMSM_MODEM_STATE) &
+			SMSM_A2_POWER_CONTROL)
+		bam_dmux_smsm_cb(NULL, 0,
+			bam_ops->smsm_get_state_ptr(SMSM_MODEM_STATE));
 
 	return 0;
 }
diff --git a/arch/arm/mach-msm/bam_dmux_private.h b/arch/arm/mach-msm/bam_dmux_private.h
new file mode 100644
index 0000000..871dd64
--- /dev/null
+++ b/arch/arm/mach-msm/bam_dmux_private.h
@@ -0,0 +1,173 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _BAM_DMUX_PRIVATE_H
+#define _BAM_DMUX_PRIVATE_H
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include <mach/sps.h>
+
+#define BAM_MUX_HDR_MAGIC_NO			0x33fc
+#define BAM_MUX_HDR_CMD_DATA			0
+#define BAM_MUX_HDR_CMD_OPEN			1
+#define BAM_MUX_HDR_CMD_CLOSE			2
+#define BAM_MUX_HDR_CMD_STATUS			3 /* unused */
+#define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC		4
+#define BUFFER_SIZE				2048
+
+/**
+ * struct bam_ops_if - collection of function pointers to allow swappable
+ * runtime functionality
+ * @smsm_change_state_ptr: pointer to smsm_change_state function
+ * @smsm_get_state_ptr: pointer to smsm_get_state function
+ * @smsm_state_cb_register_ptr: pointer to smsm_state_cb_register function
+ * @smsm_state_cb_deregister_ptr: pointer to smsm_state_cb_deregister function
+ * @sps_connect_ptr: pointer to sps_connect function
+ * @sps_disconnect_ptr: pointer to sps_disconnect function
+ * @sps_register_bam_device_ptr: pointer to sps_register_bam_device
+ * @sps_deregister_bam_device_ptr: pointer to sps_deregister_bam_device
+ * function
+ * @sps_alloc_endpoint_ptr: pointer to sps_alloc_endpoint function
+ * @sps_free_endpoint_ptr: pointer to sps_free_endpoint function
+ * @sps_set_config_ptr: pointer to sps_set_config function
+ * @sps_get_config_ptr: pointer to sps_get_config function
+ * @sps_device_reset_ptr: pointer to sps_device_reset function
+ * @sps_register_event_ptr: pointer to sps_register_event function
+ * @sps_transfer_one_ptr: pointer to sps_transfer_one function
+ * @sps_get_iovec_ptr: pointer to sps_get_iovec function
+ * @sps_get_unused_desc_num_ptr: pointer to sps_get_unused_desc_num function
+ * @dma_to: enum for the direction of dma operations to device
+ * @dma_from: enum for the direction of dma operations from device
+ *
+ * This struct contains the interface from bam_dmux to smsm and sps. The
+ * pointers can be swapped out at run time to provide different functionality.
+ */
+struct bam_ops_if {
+	/* smsm */
+	int (*smsm_change_state_ptr)(uint32_t smsm_entry,
+		uint32_t clear_mask, uint32_t set_mask);
+
+	uint32_t (*smsm_get_state_ptr)(uint32_t smsm_entry);
+
+	int (*smsm_state_cb_register_ptr)(uint32_t smsm_entry, uint32_t mask,
+		void (*notify)(void *, uint32_t old_state, uint32_t new_state),
+		void *data);
+
+	int (*smsm_state_cb_deregister_ptr)(uint32_t smsm_entry, uint32_t mask,
+		void (*notify)(void *, uint32_t, uint32_t), void *data);
+
+	/* sps */
+	int (*sps_connect_ptr)(struct sps_pipe *h, struct sps_connect *connect);
+
+	int (*sps_disconnect_ptr)(struct sps_pipe *h);
+
+	int (*sps_register_bam_device_ptr)(
+		const struct sps_bam_props *bam_props,
+		u32 *dev_handle);
+
+	int (*sps_deregister_bam_device_ptr)(u32 dev_handle);
+
+	struct sps_pipe *(*sps_alloc_endpoint_ptr)(void);
+
+	int (*sps_free_endpoint_ptr)(struct sps_pipe *h);
+
+	int (*sps_set_config_ptr)(struct sps_pipe *h,
+		struct sps_connect *config);
+
+	int (*sps_get_config_ptr)(struct sps_pipe *h,
+		struct sps_connect *config);
+
+	int (*sps_device_reset_ptr)(u32 dev);
+
+	int (*sps_register_event_ptr)(struct sps_pipe *h,
+		struct sps_register_event *reg);
+
+	int (*sps_transfer_one_ptr)(struct sps_pipe *h,
+		u32 addr, u32 size,
+		void *user, u32 flags);
+
+	int (*sps_get_iovec_ptr)(struct sps_pipe *h,
+		struct sps_iovec *iovec);
+
+	int (*sps_get_unused_desc_num_ptr)(struct sps_pipe *h,
+		u32 *desc_num);
+
+	enum dma_data_direction dma_to;
+
+	enum dma_data_direction dma_from;
+};
+
+/**
+ * struct bam_mux_hdr - struct which contains bam dmux header info
+ * @magic_num: magic number placed at start to ensure that it is actually a
+ * valid bam dmux header
+ * @reserved: for later use
+ * @cmd: the command
+ * @pad_len: the length of padding
+ * @ch_id: the id of the bam dmux channel that this is sent on
+ * @pkt_len: the length of the packet that this is the header of
+ */
+struct bam_mux_hdr {
+	uint16_t magic_num;
+	uint8_t reserved;
+	uint8_t cmd;
+	uint8_t pad_len;
+	uint8_t ch_id;
+	uint16_t pkt_len;
+};
+
+/**
+ * struct rx_pkt_info - struct describing an rx packet
+ * @skb: socket buffer containing the packet
+ * @dma_address: dma mapped address of the packet
+ * @work: work_struct for processing the packet
+ * @list_node: list_head for placing this on a list
+ */
+struct rx_pkt_info {
+	struct sk_buff *skb;
+	dma_addr_t dma_address;
+	struct work_struct work;
+	struct list_head list_node;
+};
+
+/**
+ * struct tx_pkt_info - struct describing a tx packet
+ * @skb: socket buffer containing the packet
+ * @dma_address: dma mapped address of the packet
+ * @is_cmd: signifies whether this is a command or data packet
+ * @len: length og the packet
+ * @work: work_struct for processing this packet
+ * @list_node: list_head for placing this on a list
+ * @ts_sec: seconds portion of the timestamp
+ * @ts_nsec: nanoseconds portion of the timestamp
+ *
+ */
+struct tx_pkt_info {
+	struct sk_buff *skb;
+	dma_addr_t dma_address;
+	char is_cmd;
+	uint32_t len;
+	struct work_struct work;
+	struct list_head list_node;
+	unsigned ts_sec;
+	unsigned long ts_nsec;
+};
+
+void msm_bam_dmux_set_bam_ops(struct bam_ops_if *ops);
+
+void msm_bam_dmux_deinit(void);
+
+void msm_bam_dmux_reinit(void);
+
+#endif /* _BAM_DMUX_PRIVATE_H */