tty: n_smux: Add exponential backoff for RX buffer failures

If a client is unable to allocate a buffer for an incoming packet, the
current software retries once without a delay and then drops the packet.
This causes issues for some clients that use flip buffers for processing
RX data.

This change changes the get_rx_buffer callback failure handling to do an
exponential-backoff retry giving the client time to free up a buffer for
processing.

Change-Id: I3284824a7d5a1d8a03314eebdb4488aaf560440c
Signed-off-by: Eric Holmberg <eholmber@codeaurora.org>
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index 633897f..5b5de03 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -31,7 +31,6 @@
 
 #define SMUX_NOTIFY_FIFO_SIZE	128
 #define SMUX_TX_QUEUE_SIZE	256
-#define SMUX_GET_RX_BUFF_MAX_RETRY_CNT 2
 #define SMUX_WM_LOW 2
 #define SMUX_WM_HIGH 4
 #define SMUX_PKT_LOG_SIZE 80
@@ -49,6 +48,10 @@
 /* inactivity timeout for no rx/tx activity */
 #define SMUX_INACTIVITY_TIMEOUT_MS 1000
 
+/* RX get_rx_buffer retry timeout values */
+#define SMUX_RX_RETRY_MIN_MS (1 << 0)  /* 1 ms */
+#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
+
 enum {
 	MSM_SMUX_DEBUG = 1U << 0,
 	MSM_SMUX_INFO = 1U << 1,
@@ -175,6 +178,11 @@
 	int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
 								int size);
 
+	/* RX Info */
+	struct list_head rx_retry_queue;
+	unsigned rx_retry_queue_cnt;
+	struct delayed_work rx_retry_work;
+
 	/* TX Info */
 	spinlock_t tx_lock_lhb2;
 	struct list_head tx_queue;
@@ -198,6 +206,19 @@
 };
 
 /**
+ * Get RX Buffer Retry structure.
+ *
+ * This is used for clients that are unable to provide an RX buffer
+ * immediately.  This temporary structure will be used to temporarily hold the
+ * data and perform a retry.
+ */
+struct smux_rx_pkt_retry {
+	struct smux_pkt_t *pkt;
+	struct list_head rx_retry_list;
+	unsigned timeout_in_ms;
+};
+
+/**
  * Receive worker data structure.
  *
  * One instance is created for every call to smux_rx_state_machine.
@@ -280,6 +301,7 @@
 static DECLARE_WORK(smux_tx_work, smux_tx_worker);
 
 static void smux_wakeup_worker(struct work_struct *work);
+static void smux_rx_retry_worker(struct work_struct *work);
 static void smux_rx_worker(struct work_struct *work);
 static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
 static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
@@ -373,6 +395,10 @@
 		ch->notify = 0;
 		ch->get_rx_buffer = 0;
 
+		INIT_LIST_HEAD(&ch->rx_retry_queue);
+		ch->rx_retry_queue_cnt = 0;
+		INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
+
 		spin_lock_init(&ch->tx_lock_lhb2);
 		INIT_LIST_HEAD(&ch->tx_queue);
 		INIT_LIST_HEAD(&ch->tx_ready_list);
@@ -1224,8 +1250,8 @@
 static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
 {
 	uint8_t lcid;
-	int ret;
-	int i;
+	int ret = 0;
+	int do_retry = 0;
 	int tmp;
 	int rx_len;
 	struct smux_lch_t *ch;
@@ -1239,6 +1265,12 @@
 		goto out;
 	}
 
+	rx_len = pkt->hdr.payload_len;
+	if (rx_len == 0) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	lcid = pkt->hdr.lcid;
 	ch = &smux_lch[lcid];
 	spin_lock_irqsave(&ch->state_lock_lhb1, flags);
@@ -1260,62 +1292,106 @@
 		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 		goto out;
 	}
+
+	if (!list_empty(&ch->rx_retry_queue)) {
+		do_retry = 1;
+		if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
+			/* retry queue full */
+			schedule_notify(lcid, SMUX_READ_FAIL, NULL);
+			ret = -ENOMEM;
+			spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+			goto out;
+		}
+	}
 	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 
-	rx_len = pkt->hdr.payload_len;
-	if (rx_len == 0) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	for (i = 0; i < SMUX_GET_RX_BUFF_MAX_RETRY_CNT; ++i) {
+	if (remote_loopback) {
+		/* Echo the data back to the remote client. */
+		ack_pkt = smux_alloc_pkt();
+		if (ack_pkt) {
+			ack_pkt->hdr.lcid = lcid;
+			ack_pkt->hdr.cmd = SMUX_CMD_DATA;
+			ack_pkt->hdr.flags = 0;
+			ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
+			if (ack_pkt->hdr.payload_len) {
+				smux_alloc_pkt_payload(ack_pkt);
+				memcpy(ack_pkt->payload, pkt->payload,
+						ack_pkt->hdr.payload_len);
+			}
+			ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
+			smux_tx_queue(ack_pkt, ch, 0);
+			list_channel(ch);
+		} else {
+			pr_err("%s: Remote loopack allocation failure\n",
+					__func__);
+		}
+	} else if (!do_retry) {
+		/* request buffer from client */
 		metadata.read.pkt_priv = 0;
 		metadata.read.buffer = 0;
+		tmp = ch->get_rx_buffer(ch->priv,
+				(void **)&metadata.read.pkt_priv,
+				(void **)&metadata.read.buffer,
+				rx_len);
 
-		if (!remote_loopback) {
-			tmp = ch->get_rx_buffer(ch->priv,
-					(void **)&metadata.read.pkt_priv,
-					(void **)&metadata.read.buffer,
+		if (tmp == 0 && metadata.read.buffer) {
+			/* place data into RX buffer */
+			memcpy(metadata.read.buffer, pkt->payload,
 					rx_len);
-			if (tmp == 0 && metadata.read.buffer) {
-				/* place data into RX buffer */
-				memcpy(metadata.read.buffer, pkt->payload,
-								rx_len);
-				metadata.read.len = rx_len;
-				schedule_notify(lcid, SMUX_READ_DONE,
-								&metadata);
-				ret = 0;
-				break;
-			} else if (tmp == -EAGAIN) {
-				ret = -ENOMEM;
-			} else if (tmp < 0) {
-				schedule_notify(lcid, SMUX_READ_FAIL, NULL);
-				ret = -ENOMEM;
-				break;
-			} else if (!metadata.read.buffer) {
-				pr_err("%s: get_rx_buffer() buffer is NULL\n",
-					__func__);
-				ret = -ENOMEM;
-			}
-		} else {
-			/* Echo the data back to the remote client. */
-			ack_pkt = smux_alloc_pkt();
-			if (ack_pkt) {
-				ack_pkt->hdr.lcid = lcid;
-				ack_pkt->hdr.cmd = SMUX_CMD_DATA;
-				ack_pkt->hdr.flags = 0;
-				ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
-				ack_pkt->payload = pkt->payload;
-				ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
-				smux_tx_queue(ack_pkt, ch, 0);
-				list_channel(ch);
-			} else {
-				pr_err("%s: Remote loopack allocation failure\n",
-						__func__);
-			}
+			metadata.read.len = rx_len;
+			schedule_notify(lcid, SMUX_READ_DONE,
+							&metadata);
+		} else if (tmp == -EAGAIN ||
+				(tmp == 0 && !metadata.read.buffer)) {
+			/* buffer allocation failed - add to retry queue */
+			do_retry = 1;
+		} else if (tmp < 0) {
+			schedule_notify(lcid, SMUX_READ_FAIL, NULL);
+			ret = -ENOMEM;
 		}
 	}
 
+	if (do_retry) {
+		struct smux_rx_pkt_retry *retry;
+
+		retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
+		if (!retry) {
+			pr_err("%s: retry alloc failure\n", __func__);
+			ret = -ENOMEM;
+			schedule_notify(lcid, SMUX_READ_FAIL, NULL);
+			goto out;
+		}
+		INIT_LIST_HEAD(&retry->rx_retry_list);
+		retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
+
+		/* copy packet */
+		retry->pkt = smux_alloc_pkt();
+		if (!retry->pkt) {
+			kfree(retry);
+			pr_err("%s: pkt alloc failure\n", __func__);
+			ret = -ENOMEM;
+			schedule_notify(lcid, SMUX_READ_FAIL, NULL);
+			goto out;
+		}
+		retry->pkt->hdr.lcid = lcid;
+		retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
+		retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
+		if (retry->pkt->hdr.payload_len) {
+			smux_alloc_pkt_payload(retry->pkt);
+			memcpy(retry->pkt->payload, pkt->payload,
+					retry->pkt->hdr.payload_len);
+		}
+
+		/* add to retry queue */
+		spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+		list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
+		++ch->rx_retry_queue_cnt;
+		if (ch->rx_retry_queue_cnt == 1)
+			queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
+				msecs_to_jiffies(retry->timeout_in_ms));
+		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+	}
+
 out:
 	return ret;
 }
@@ -2022,6 +2098,23 @@
 }
 
 /**
+ * Remove RX retry packet from channel and free it.
+ *
+ * Must be called with state_lock_lhb1 locked.
+ *
+ * @ch    Channel for retry packet
+ * @retry Retry packet to remove
+ */
+void smux_remove_rx_retry(struct smux_lch_t *ch,
+		struct smux_rx_pkt_retry *retry)
+{
+	list_del(&retry->rx_retry_list);
+	--ch->rx_retry_queue_cnt;
+	smux_free_pkt(retry->pkt);
+	kfree(retry);
+}
+
+/**
  * RX worker handles all receive operations.
  *
  * @work  Work structure contained in TBD structure
@@ -2077,6 +2170,95 @@
 }
 
 /**
+ * RX Retry worker handles retrying get_rx_buffer calls that previously failed
+ * because the client was not ready (-EAGAIN).
+ *
+ * @work  Work structure contained in smux_lch_t structure
+ */
+static void smux_rx_retry_worker(struct work_struct *work)
+{
+	struct smux_lch_t *ch;
+	struct smux_rx_pkt_retry *retry;
+	union notifier_metadata metadata;
+	int tmp;
+	unsigned long flags;
+
+	ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
+
+	/* get next retry packet */
+	spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+	if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
+		/* port has been closed - remove all retries */
+		while (!list_empty(&ch->rx_retry_queue)) {
+			retry = list_first_entry(&ch->rx_retry_queue,
+						struct smux_rx_pkt_retry,
+						rx_retry_list);
+			smux_remove_rx_retry(ch, retry);
+		}
+	}
+
+	if (list_empty(&ch->rx_retry_queue)) {
+		SMUX_DBG("%s: retry list empty for channel %d\n",
+				__func__, ch->lcid);
+		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+		return;
+	}
+	retry = list_first_entry(&ch->rx_retry_queue,
+					struct smux_rx_pkt_retry,
+					rx_retry_list);
+	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+	SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
+	metadata.read.pkt_priv = 0;
+	metadata.read.buffer = 0;
+	tmp = ch->get_rx_buffer(ch->priv,
+			(void **)&metadata.read.pkt_priv,
+			(void **)&metadata.read.buffer,
+			retry->pkt->hdr.payload_len);
+	if (tmp == 0 && metadata.read.buffer) {
+		/* have valid RX buffer */
+		memcpy(metadata.read.buffer, retry->pkt->payload,
+						retry->pkt->hdr.payload_len);
+		metadata.read.len = retry->pkt->hdr.payload_len;
+
+		spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+		smux_remove_rx_retry(ch, retry);
+		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+		schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
+	} else if (tmp == -EAGAIN ||
+			(tmp == 0 && !metadata.read.buffer)) {
+		/* retry again */
+		retry->timeout_in_ms <<= 1;
+		if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
+			/* timed out */
+			spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+			smux_remove_rx_retry(ch, retry);
+			schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+			spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+		}
+	} else {
+		/* client error - drop packet */
+		spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+		smux_remove_rx_retry(ch, retry);
+		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+
+		schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+	}
+
+	/* schedule next retry */
+	spin_lock_irqsave(&ch->state_lock_lhb1, flags);
+	if (!list_empty(&ch->rx_retry_queue)) {
+		retry = list_first_entry(&ch->rx_retry_queue,
+						struct smux_rx_pkt_retry,
+						rx_retry_list);
+		queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
+				msecs_to_jiffies(retry->timeout_in_ms));
+	}
+	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+}
+
+/**
  * Transmit worker handles serializing and transmitting packets onto the
  * underlying transport.
  *
@@ -2428,6 +2610,10 @@
 			pr_err("%s: pkt allocation failed\n", __func__);
 			ret = -ENOMEM;
 		}
+
+		/* Purge RX retry queue */
+		if (ch->rx_retry_queue_cnt)
+			queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
 	}
 	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
index 6bd9713..2c8819c 100644
--- a/drivers/tty/smux_private.h
+++ b/drivers/tty/smux_private.h
@@ -29,6 +29,9 @@
 #define SMUX_UT_ECHO_ACK_OK 0xF1
 #define SMUX_UT_ECHO_ACK_FAIL 0xF2
 
+/* Maximum number of packets in retry queue */
+#define SMUX_RX_RETRY_MAX_PKTS 32
+
 struct tty_struct;
 
 /* Packet header. */
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
index e5cdcbd..62e9465 100644
--- a/drivers/tty/smux_test.c
+++ b/drivers/tty/smux_test.c
@@ -75,9 +75,44 @@
 	} \
 	do {} while (0)
 
+/**
+ * In-range unit test assertion for test cases.
+ *
+ * @a lval
+ * @minv Minimum value
+ * @maxv Maximum value
+ *
+ * Assertion fails if @a is not on the exclusive range minv, maxv
+ * ((@a < @minv) or (@a > @maxv)).  In the failure case, the macro
+ * logs the function and line number where the error occurred along
+ * with the values of @a and @minv, @maxv.
+ *
+ * Assumes that the following local variables exist:
+ * @buf - buffer to write failure message to
+ * @i - number of bytes written to buffer
+ * @max - maximum size of the buffer
+ * @failed - set to true if test fails
+ */
+#define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
+	if (((a) < (minv)) || ((a) > (maxv))) { \
+		i += scnprintf(buf + i, max - i, \
+			"%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
+				 #a "(%d) > " #maxv "(%d)\n", \
+				__func__, __LINE__, \
+				a, minv, a, maxv); \
+		failed = 1; \
+		break; \
+	} \
+	do {} while (0)
+
+
 static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
 					89, 144, 233};
 
+/* when 1, forces failure of get_rx_buffer_mock function */
+static int get_rx_buffer_mock_fail;
+
+
 /* Used for mapping local to remote TIOCM signals */
 struct tiocm_test_vector {
 	uint32_t input;
@@ -118,6 +153,13 @@
 	struct smux_meta_write meta;
 };
 
+/* Mock object metadata for get_rx_buffer failure event */
+struct mock_get_rx_buff_event {
+	struct list_head list;
+	int size;
+	unsigned long jiffies;
+};
+
 /* Mock object for all SMUX callback events */
 struct smux_mock_callback {
 	int cb_count;
@@ -140,6 +182,10 @@
 	int event_read_failed;
 	struct list_head read_events;
 
+	/* read retry data */
+	int get_rx_buff_retry_count;
+	struct list_head get_rx_buff_retry_events;
+
 	/* write event data */
 	int event_write_done;
 	int event_write_failed;
@@ -156,6 +202,7 @@
 	init_completion(&cb->cb_completion);
 	spin_lock_init(&cb->lock);
 	INIT_LIST_HEAD(&cb->read_events);
+	INIT_LIST_HEAD(&cb->get_rx_buff_retry_events);
 	INIT_LIST_HEAD(&cb->write_events);
 }
 
@@ -191,6 +238,16 @@
 		kfree(meta);
 	}
 
+	cb->get_rx_buff_retry_count = 0;
+	while (!list_empty(&cb->get_rx_buff_retry_events)) {
+		struct mock_get_rx_buff_event *meta;
+		meta = list_first_entry(&cb->get_rx_buff_retry_events,
+				struct mock_get_rx_buff_event,
+				list);
+		list_del(&meta->list);
+		kfree(meta);
+	}
+
 	cb->event_write_done = 0;
 	cb->event_write_failed = 0;
 	while (!list_empty(&cb->write_events)) {
@@ -229,6 +286,8 @@
 		"\tevent_read_done=%d\n"
 		"\tevent_read_failed=%d\n"
 		"\tread_events=%d\n"
+		"\tget_rx_retry=%d\n"
+		"\tget_rx_retry_events=%d\n"
 		"\tevent_write_done=%d\n"
 		"\tevent_write_failed=%d\n"
 		"\twrite_events=%d\n",
@@ -243,6 +302,8 @@
 		cb->event_read_done,
 		cb->event_read_failed,
 		!list_empty(&cb->read_events),
+		cb->get_rx_buff_retry_count,
+		!list_empty(&cb->get_rx_buff_retry_events),
 		cb->event_write_done,
 		cb->event_write_failed,
 		list_empty(&cb->write_events)
@@ -303,8 +364,12 @@
 		spin_lock_irqsave(&cb_data_ptr->lock, flags);
 		++cb_data_ptr->event_read_failed;
 		if (read_event_meta) {
-			read_event_meta->meta =
+			if (metadata)
+				read_event_meta->meta =
 					*(struct smux_meta_read *)metadata;
+			else
+				memset(&read_event_meta->meta, 0x0,
+						sizeof(struct smux_meta_read));
 			list_add_tail(&read_event_meta->list,
 					&cb_data_ptr->read_events);
 		}
@@ -1171,6 +1236,338 @@
 	return i;
 }
 
+/**
+ * Allocates a new buffer or returns a failure based upon the
+ * global @get_rx_buffer_mock_fail.
+ */
+static int get_rx_buffer_mock(void *priv, void **pkt_priv,
+		void **buffer, int size)
+{
+	void *rx_buf;
+	unsigned long flags;
+	struct smux_mock_callback *cb_ptr;
+
+	cb_ptr = (struct smux_mock_callback *)priv;
+	if (!cb_ptr) {
+		pr_err("%s: no callback data\n", __func__);
+		return -ENXIO;
+	}
+
+	if (get_rx_buffer_mock_fail) {
+		/* force failure and log failure event */
+		struct mock_get_rx_buff_event *meta;
+		meta = kmalloc(sizeof(struct mock_get_rx_buff_event),
+				GFP_KERNEL);
+		if (!meta) {
+			pr_err("%s: unable to allocate metadata\n", __func__);
+			return -ENOMEM;
+		}
+		INIT_LIST_HEAD(&meta->list);
+		meta->size = size;
+		meta->jiffies = jiffies;
+
+		spin_lock_irqsave(&cb_ptr->lock, flags);
+		++cb_ptr->get_rx_buff_retry_count;
+		list_add_tail(&meta->list, &cb_ptr->get_rx_buff_retry_events);
+		++cb_ptr->cb_count;
+		complete(&cb_ptr->cb_completion);
+		spin_unlock_irqrestore(&cb_ptr->lock, flags);
+		return -EAGAIN;
+	} else {
+		rx_buf = kmalloc(size, GFP_KERNEL);
+		*pkt_priv = (void *)0x1234;
+		*buffer = rx_buf;
+		return 0;
+	}
+	return 0;
+}
+
+/**
+ * Verify get_rx_buffer callback retry.
+ *
+ * @buf  Buffer for status message
+ * @max  Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_get_rx_buff_retry(char *buf, int max)
+{
+	static struct smux_mock_callback cb_data;
+	static int cb_initialized;
+	int i = 0;
+	int failed = 0;
+	char try_two[] = "try 2";
+	int ret;
+	unsigned long start_j;
+	struct mock_get_rx_buff_event *event;
+	struct mock_read_event *read_event;
+	int try;
+
+	i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+	pr_err("%s", buf);
+
+	if (!cb_initialized)
+		mock_cb_data_init(&cb_data);
+
+	mock_cb_data_reset(&cb_data);
+	smux_byte_loopback = SMUX_TEST_LCID;
+	while (!failed) {
+		/* open port for loopback */
+		ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+				SMUX_CH_OPTION_LOCAL_LOOPBACK,
+				0);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
+				smux_mock_cb, get_rx_buffer_mock);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ), >, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+		mock_cb_data_reset(&cb_data);
+
+		/*
+		 * Force get_rx_buffer failure for a single RX packet
+		 *
+		 * The get_rx_buffer calls should follow an exponential
+		 * back-off with a maximum timeout of 1024 ms after which we
+		 * will get a failure notification.
+		 *
+		 * Try   Post Delay (ms)
+		 *  0      -
+		 *  1      1
+		 *  2      2
+		 *  3      4
+		 *  4      8
+		 *  5     16
+		 *  6     32
+		 *  7     64
+		 *  8    128
+		 *  9    256
+		 * 10    512
+		 * 11   1024
+		 * 12   Fail
+		 *
+		 * All times are limited by the precision of the timer
+		 * framework, so ranges are used in the test
+		 * verification.
+		 */
+		get_rx_buffer_mock_fail = 1;
+		start_j = jiffies;
+		ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
+					test_array, sizeof(test_array));
+		UT_ASSERT_INT(ret, ==, 0);
+		ret = msm_smux_write(SMUX_TEST_LCID, (void *)2,
+					try_two, sizeof(try_two));
+		UT_ASSERT_INT(ret, ==, 0);
+
+		/* wait for RX failure event */
+		while (cb_data.event_read_failed == 0) {
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, 2*HZ),
+				>, 0);
+			INIT_COMPLETION(cb_data.cb_completion);
+		}
+		if (failed)
+			break;
+
+		/* verify retry attempts */
+		UT_ASSERT_INT(cb_data.get_rx_buff_retry_count, ==, 12);
+		event = list_first_entry(&cb_data.get_rx_buff_retry_events,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				0, 0 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				1, 1 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				2, 2 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				4, 4 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				8, 8 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				16, 16 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				32 - 20, 32 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				64 - 20, 64 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				128 - 20, 128 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				256 - 20, 256 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				512 - 20, 512 + 20);
+		start_j = event->jiffies;
+
+		event = list_first_entry(&event->list,
+				struct mock_get_rx_buff_event, list);
+		pr_err("%s: event->jiffies = %d (ms)\n", __func__,
+				jiffies_to_msecs(event->jiffies - start_j));
+		UT_ASSERT_INT_IN_RANGE(
+				jiffies_to_msecs(event->jiffies - start_j),
+				1024 - 20, 1024 + 20);
+		mock_cb_data_reset(&cb_data);
+
+		/* verify 2nd pending RX packet goes through */
+		get_rx_buffer_mock_fail = 0;
+		INIT_COMPLETION(cb_data.cb_completion);
+		if (cb_data.event_read_done == 0)
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ),
+				>, 0);
+		UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
+		UT_ASSERT_INT(list_empty(&cb_data.read_events), ==, 0);
+		read_event = list_first_entry(&cb_data.read_events,
+				struct mock_read_event, list);
+		UT_ASSERT_PTR(read_event->meta.pkt_priv, ==, (void *)0x1234);
+		UT_ASSERT_PTR(read_event->meta.buffer, !=, NULL);
+		UT_ASSERT_INT(0, ==, memcmp(read_event->meta.buffer, try_two,
+				sizeof(try_two)));
+		mock_cb_data_reset(&cb_data);
+
+		/* Test maximum retry queue size */
+		get_rx_buffer_mock_fail = 1;
+		for (try = 0; try < (SMUX_RX_RETRY_MAX_PKTS + 1); ++try) {
+			mock_cb_data_reset(&cb_data);
+			ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
+						test_array, sizeof(test_array));
+			UT_ASSERT_INT(ret, ==, 0);
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ),
+				>, 0);
+		}
+
+		/* should have 32 successful rx packets and 1 failed */
+		while (cb_data.event_read_failed == 0) {
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, 2*HZ),
+				>, 0);
+			INIT_COMPLETION(cb_data.cb_completion);
+		}
+		if (failed)
+			break;
+
+		get_rx_buffer_mock_fail = 0;
+		while (cb_data.event_read_done < SMUX_RX_RETRY_MAX_PKTS) {
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, 2*HZ),
+				>, 0);
+			INIT_COMPLETION(cb_data.cb_completion);
+		}
+		if (failed)
+			break;
+
+		UT_ASSERT_INT(1, ==, cb_data.event_read_failed);
+		UT_ASSERT_INT(SMUX_RX_RETRY_MAX_PKTS, ==,
+				cb_data.event_read_done);
+		mock_cb_data_reset(&cb_data);
+
+		/* close port */
+		ret = msm_smux_close(SMUX_TEST_LCID);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+				&cb_data.cb_completion, HZ),
+			>, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+		break;
+	}
+
+	if (!failed) {
+		i += scnprintf(buf + i, max - i, "\tOK\n");
+	} else {
+		pr_err("%s: Failed\n", __func__);
+		i += scnprintf(buf + i, max - i, "\tFailed\n");
+		i += mock_cb_data_print(&cb_data, buf + i, max - i);
+		msm_smux_close(SMUX_TEST_LCID);
+	}
+	smux_byte_loopback = 0;
+	mock_cb_data_reset(&cb_data);
+	return i;
+}
+
 static char debug_buffer[DEBUG_BUFMAX];
 
 static ssize_t debug_read(struct file *file, char __user *buf,
@@ -1232,6 +1629,8 @@
 	debug_create("ut_local_wm", 0444, dent, smux_ut_local_wm);
 	debug_create("ut_local_smuxld_receive_buf", 0444, dent,
 			smux_ut_local_smuxld_receive_buf);
+	debug_create("ut_local_get_rx_buff_retry", 0444, dent,
+			smux_ut_local_get_rx_buff_retry);
 
 	return 0;
 }