msm: bam_dmux: Add internal state logging

Add internal logging of state to make debugging easier
and to prevent excessive kernel logging.

Change-Id: Ia0e41bf30ddc40dc2316058f41b72ca43972aebb
Signed-off-by: Eric Holmberg <eholmber@codeaurora.org>
Signed-off-by: Gopal G Goberu <ggober@codeaurora.org>
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index cc6f18f..6414359 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -26,6 +26,7 @@
 #include <linux/debugfs.h>
 #include <linux/clk.h>
 #include <linux/wakelock.h>
+#include <linux/kfifo.h>
 
 #include <mach/sps.h>
 #include <mach/bam_dmux.h>
@@ -119,6 +120,8 @@
 	uint32_t len;
 	struct work_struct work;
 	struct list_head list_node;
+	unsigned ts_sec;
+	unsigned long ts_nsec;
 };
 
 struct rx_pkt_info {
@@ -223,6 +226,112 @@
 #define bam_ch_is_in_reset(x)			\
 	(bam_ch[(x)].status & BAM_CH_IN_RESET)
 
+#define LOG_MESSAGE_MAX_SIZE 80
+struct kfifo bam_dmux_state_log;
+static uint32_t bam_dmux_state_logging_disabled;
+static DEFINE_SPINLOCK(bam_dmux_logging_spinlock);
+static int bam_dmux_uplink_vote;
+static int bam_dmux_power_state;
+
+
+#define DMUX_LOG_KERR(fmt...) \
+do { \
+	bam_dmux_log(fmt); \
+	pr_err(fmt); \
+} while (0)
+
+/**
+ * Log a state change along with a small message.
+ *
+ * Complete size of messsage is limited to @todo.
+ */
+static void bam_dmux_log(const char *fmt, ...)
+{
+	char buff[LOG_MESSAGE_MAX_SIZE];
+	unsigned long flags;
+	va_list arg_list;
+	unsigned long long t_now;
+	unsigned long nanosec_rem;
+	int len = 0;
+
+	if (bam_dmux_state_logging_disabled)
+		return;
+
+	t_now = sched_clock();
+	nanosec_rem = do_div(t_now, 1000000000U);
+
+	/*
+	 * States
+	 * R: 1 = in global reset
+	 * P: 1 = BAM is powered up
+	 * A: 1 = BAM initialized and ready for data
+	 *
+	 * V: 1 = Uplink vote for power
+	 * U: 1 = Uplink active
+	 * W: 1 = Uplink Wait-for-ack
+	 * A: 1 = Uplink ACK received
+	 */
+	len += scnprintf(buff, sizeof(buff),
+		"<DMUX> %u.%09lu %c%c%c %c%c%c%c ",
+		(unsigned)t_now, nanosec_rem,
+		in_global_reset ? 'R' : 'r',
+		bam_dmux_power_state ? 'P' : 'p',
+		bam_connection_is_active ? 'A' : 'a',
+		bam_dmux_uplink_vote ? 'V' : 'v',
+		bam_is_connected ?  'U' : 'u',
+		wait_for_ack ? 'W' : 'w',
+		ul_wakeup_ack_completion.done ? 'A' : 'a'
+		);
+
+	va_start(arg_list, fmt);
+	len += vscnprintf(buff + len, sizeof(buff) - len, fmt, arg_list);
+	va_end(arg_list);
+	memset(buff + len, 0x0, sizeof(buff) - len);
+
+	spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
+	if (kfifo_avail(&bam_dmux_state_log) < LOG_MESSAGE_MAX_SIZE) {
+		char junk[LOG_MESSAGE_MAX_SIZE];
+		int ret;
+
+		ret = kfifo_out(&bam_dmux_state_log, junk, sizeof(junk));
+		if (ret != LOG_MESSAGE_MAX_SIZE) {
+			pr_err("%s: unable to empty log %d\n", __func__, ret);
+			spin_unlock_irqrestore(&bam_dmux_logging_spinlock,
+					flags);
+			return;
+		}
+	}
+	kfifo_in(&bam_dmux_state_log, buff, sizeof(buff));
+	spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
+}
+
+static inline void set_tx_timestamp(struct tx_pkt_info *pkt)
+{
+	unsigned long long t_now;
+
+	t_now = sched_clock();
+	pkt->ts_nsec = do_div(t_now, 1000000000U);
+	pkt->ts_sec = (unsigned)t_now;
+}
+
+static inline void verify_tx_queue_is_empty(const char *func)
+{
+	unsigned long flags;
+	struct tx_pkt_info *info;
+	int reported = 0;
+
+	spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
+	list_for_each_entry(info, &bam_tx_pool, list_node) {
+		if (!reported) {
+			DMUX_LOG_KERR("%s: tx pool not empty\n", func);
+			reported = 1;
+		}
+		DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
+		&info->list_node, info->ts_sec, info->ts_nsec);
+	}
+	spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
+}
+
 static void queue_rx(void)
 {
 	void *ptr;
@@ -306,7 +415,8 @@
 			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
 			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
 	if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
-		pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
+		DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
+			" reserved %d cmd %d"
 			" pad %d ch %d len %d\n", __func__,
 			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
 			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
@@ -316,7 +426,8 @@
 	}
 
 	if (rx_hdr->ch_id >= BAM_DMUX_NUM_CHANNELS) {
-		pr_err("%s: dropping invalid LCID %d reserved %d cmd %d"
+		DMUX_LOG_KERR("%s: dropping invalid LCID %d"
+			" reserved %d cmd %d"
 			" pad %d ch %d len %d\n", __func__,
 			rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
 			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
@@ -331,6 +442,8 @@
 		bam_mux_process_data(rx_skb);
 		break;
 	case BAM_MUX_HDR_CMD_OPEN:
+		bam_dmux_log("%s: opening cid %d\n", __func__,
+				rx_hdr->ch_id);
 		spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
 		bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
 		bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
@@ -344,6 +457,8 @@
 		break;
 	case BAM_MUX_HDR_CMD_CLOSE:
 		/* probably should drop pending write */
+		bam_dmux_log("%s: closing cid %d\n", __func__,
+				rx_hdr->ch_id);
 		spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
 		bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
 		spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
@@ -356,10 +471,11 @@
 		dev_kfree_skb_any(rx_skb);
 		break;
 	default:
-		pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
-			" pad %d ch %d len %d\n", __func__,
-			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
-			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
+		DMUX_LOG_KERR("%s: dropping invalid hdr. magic %x"
+			   " reserved %d cmd %d pad %d ch %d len %d\n",
+			__func__, rx_hdr->magic_num, rx_hdr->reserved,
+			rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
+			rx_hdr->pkt_len);
 		dev_kfree_skb_any(rx_skb);
 		queue_rx();
 		return;
@@ -392,6 +508,7 @@
 	pkt->len = len;
 	pkt->dma_address = dma_address;
 	pkt->is_cmd = 1;
+	set_tx_timestamp(pkt);
 	INIT_WORK(&pkt->work, bam_mux_write_done);
 	spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
 	list_add_tail(&pkt->list_node, &bam_tx_pool);
@@ -429,12 +546,20 @@
 	info_expected = list_first_entry(&bam_tx_pool,
 			struct tx_pkt_info, list_node);
 	if (unlikely(info != info_expected)) {
-		struct list_head *node;
+		struct tx_pkt_info *errant_pkt;
 
-		pr_err("%s: bam_tx_pool mismatch .next=%p, list_node=%p\n",
-				__func__, bam_tx_pool.next, &info->list_node);
-		list_for_each(node, &bam_tx_pool)
-			pr_err("%s: node=%p\n", __func__, node);
+		DMUX_LOG_KERR("%s: bam_tx_pool mismatch .next=%p,"
+				" list_node=%p, ts=%u.%09lu\n",
+				__func__, bam_tx_pool.next, &info->list_node,
+				info->ts_sec, info->ts_nsec
+				);
+
+		list_for_each_entry(errant_pkt, &bam_tx_pool, list_node) {
+			DMUX_LOG_KERR("%s: node=%p ts=%u.%09lu\n", __func__,
+			&errant_pkt->list_node, errant_pkt->ts_sec,
+			errant_pkt->ts_nsec);
+
+		}
 		spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
 		BUG();
 	}
@@ -550,6 +675,7 @@
 	pkt->skb = skb;
 	pkt->dma_address = dma_address;
 	pkt->is_cmd = 0;
+	set_tx_timestamp(pkt);
 	INIT_WORK(&pkt->work, bam_mux_write_done);
 	spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
 	list_add_tail(&pkt->list_node, &bam_tx_pool);
@@ -966,6 +1092,57 @@
 	return i;
 }
 
+static int debug_log(char *buff, int max, loff_t *ppos)
+{
+	unsigned long flags;
+	int i = 0;
+
+	if (bam_dmux_state_logging_disabled) {
+		i += scnprintf(buff - i, max - i, "Logging disabled\n");
+		return i;
+	}
+
+	if (*ppos == 0) {
+		i += scnprintf(buff - i, max - i,
+			"<DMUX> timestamp FLAGS [Message]\n"
+			"FLAGS:\n"
+			"\tR: 1 = in global reset\n"
+			"\tP: 1 = BAM is powered up\n"
+			"\tA: 1 = BAM initialized and ready for data\n"
+			"\n"
+			"\tV: 1 = Uplink vote for power\n"
+			"\tU: 1 = Uplink active\n"
+			"\tW: 1 = Uplink Wait-for-ack\n"
+			"\tA: 1 = Uplink ACK received\n"
+				);
+		buff += i;
+	}
+
+	spin_lock_irqsave(&bam_dmux_logging_spinlock, flags);
+	while (kfifo_len(&bam_dmux_state_log)
+			&& (i + LOG_MESSAGE_MAX_SIZE) < max) {
+		int k_len;
+		k_len = kfifo_out(&bam_dmux_state_log,
+				buff, LOG_MESSAGE_MAX_SIZE);
+		if (k_len != LOG_MESSAGE_MAX_SIZE) {
+			pr_err("%s: retrieve failure %d\n", __func__, k_len);
+			break;
+		}
+
+		/* keep non-null portion of string and add line break */
+		k_len = strnlen(buff, LOG_MESSAGE_MAX_SIZE);
+		buff += k_len;
+		i += k_len;
+		if (k_len && *(buff - 1) != '\n') {
+			*buff++ = '\n';
+			++i;
+		}
+	}
+	spin_unlock_irqrestore(&bam_dmux_logging_spinlock, flags);
+
+	return i;
+}
+
 #define DEBUG_BUFMAX 4096
 static char debug_buffer[DEBUG_BUFMAX];
 
@@ -977,6 +1154,30 @@
 	return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
 }
 
+static ssize_t debug_read_multiple(struct file *file, char __user *buff,
+				size_t count, loff_t *ppos)
+{
+	int (*util_func)(char *buf, int max, loff_t *) = file->private_data;
+	char *buffer;
+	int bsize;
+
+	buffer = kmalloc(count, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	bsize = util_func(buffer, count, ppos);
+
+	if (bsize >= 0) {
+		if (copy_to_user(buff, buffer, bsize)) {
+			kfree(buffer);
+			return -EFAULT;
+		}
+		*ppos += bsize;
+	}
+	kfree(buffer);
+	return bsize;
+}
+
 static int debug_open(struct inode *inode, struct file *file)
 {
 	file->private_data = inode->i_private;
@@ -989,6 +1190,11 @@
 	.open = debug_open,
 };
 
+static const struct file_operations debug_ops_multiple = {
+	.read = debug_read_multiple,
+	.open = debug_open,
+};
+
 static void debug_create(const char *name, mode_t mode,
 				struct dentry *dent,
 				int (*fill)(char *buf, int max))
@@ -1026,6 +1232,21 @@
 	queue_work(bam_mux_tx_workqueue, &kickoff_ul_wakeup);
 }
 
+static void power_vote(int vote)
+{
+	bam_dmux_log("%s: curr=%d, vote=%d\n", __func__,
+			bam_dmux_uplink_vote, vote);
+
+	if (bam_dmux_uplink_vote == vote)
+		bam_dmux_log("%s: warning - duplicate power vote\n", __func__);
+
+	bam_dmux_uplink_vote = vote;
+	if (vote)
+		smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
+	else
+		smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
+}
+
 static void ul_timeout(struct work_struct *work)
 {
 	unsigned long flags;
@@ -1040,12 +1261,13 @@
 		return;
 	}
 	if (ul_packet_written) {
-		pr_info("%s: packet written\n", __func__);
+		bam_dmux_log("%s: packet written\n", __func__);
 		ul_packet_written = 0;
 		schedule_delayed_work(&ul_timeout_work,
 				msecs_to_jiffies(UL_TIMEOUT_DELAY));
 	} else {
-		pr_info("%s: powerdown\n", __func__);
+		bam_dmux_log("%s: powerdown\n", __func__);
+		verify_tx_queue_is_empty(__func__);
 		wait_for_ack = 1;
 		INIT_COMPLETION(ul_wakeup_ack_completion);
 		smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
@@ -1060,30 +1282,35 @@
 
 	mutex_lock(&wakeup_lock);
 	if (bam_is_connected) { /* bam got connected before lock grabbed */
+		bam_dmux_log("%s Already awake\n", __func__);
 		mutex_unlock(&wakeup_lock);
 		return;
 	}
-	pr_info("%s\n", __func__);
+
 	/*
 	 * must wait for the previous power down request to have been acked
 	 * chances are it already came in and this will just fall through
 	 * instead of waiting
 	 */
 	if (wait_for_ack) {
+		bam_dmux_log("%s waiting for previous ack\n", __func__);
 		ret = wait_for_completion_interruptible_timeout(
 					&ul_wakeup_ack_completion, HZ);
 		BUG_ON(ret == 0);
 	}
 	INIT_COMPLETION(ul_wakeup_ack_completion);
-	smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
+	power_vote(1);
+	bam_dmux_log("%s waiting for wakeup ack\n", __func__);
 	ret = wait_for_completion_interruptible_timeout(
 						&ul_wakeup_ack_completion, HZ);
 	BUG_ON(ret == 0);
+	bam_dmux_log("%s waiting completion\n", __func__);
 	ret = wait_for_completion_interruptible_timeout(
 						&bam_connection_completion, HZ);
 	BUG_ON(ret == 0);
 
 	bam_is_connected = 1;
+	bam_dmux_log("%s complete\n", __func__);
 	schedule_delayed_work(&ul_timeout_work,
 				msecs_to_jiffies(UL_TIMEOUT_DELAY));
 	mutex_unlock(&wakeup_lock);
@@ -1147,6 +1374,8 @@
 		kfree(info);
 	}
 	mutex_unlock(&bam_rx_pool_mutexlock);
+
+	verify_tx_queue_is_empty(__func__);
 }
 
 static void vote_dfab(void)
@@ -1176,6 +1405,7 @@
 	if (code != SUBSYS_AFTER_SHUTDOWN)
 		return NOTIFY_DONE;
 
+	bam_dmux_log("%s: begin\n", __func__);
 	in_global_reset = 1;
 	for (i = 0; i < BAM_DMUX_NUM_CHANNELS; ++i) {
 		temp_remote_status = bam_ch_is_remote_open(i);
@@ -1210,8 +1440,8 @@
 		kfree(info);
 	}
 	spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
-	smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
-
+	power_vote(0);
+	bam_dmux_log("%s: complete\n", __func__);
 	return NOTIFY_DONE;
 }
 
@@ -1415,7 +1645,9 @@
 static void toggle_apps_ack(void)
 {
 	static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
-	pr_info("%s: clear bit: %d\n", __func__, clear_bit);
+
+	bam_dmux_log("%s: apps ack %d->%d\n", __func__,
+			clear_bit & 0x1, ~clear_bit & 0x1);
 	smsm_change_state(SMSM_APPS_STATE,
 				clear_bit & SMSM_A2_POWER_CONTROL_ACK,
 				~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
@@ -1425,19 +1657,22 @@
 static void bam_dmux_smsm_cb(void *priv, uint32_t old_state, uint32_t new_state)
 {
 	int ret = 0;
-	pr_info("%s: smsm activity 0x%08x -> 0x%08x\n", __func__, old_state,
-							new_state);
+
+	bam_dmux_power_state = new_state & SMSM_A2_POWER_CONTROL ? 1 : 0;
+	bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+			new_state);
+
 	if (bam_mux_initialized && new_state & SMSM_A2_POWER_CONTROL) {
-		pr_info("%s: reconnect\n", __func__);
+		bam_dmux_log("%s: reconnect\n", __func__);
 		wake_lock(&bam_wakelock);
 		reconnect_to_bam();
 	} else if (bam_mux_initialized &&
 					!(new_state & SMSM_A2_POWER_CONTROL)) {
-		pr_info("%s: disconnect\n", __func__);
+		bam_dmux_log("%s: disconnect\n", __func__);
 		disconnect_to_bam();
 		wake_unlock(&bam_wakelock);
 	} else if (new_state & SMSM_A2_POWER_CONTROL) {
-		pr_info("%s: init\n", __func__);
+		bam_dmux_log("%s: init\n", __func__);
 		wake_lock(&bam_wakelock);
 		ret = bam_init();
 		if (ret) {
@@ -1447,6 +1682,7 @@
 						__func__, ret);
 		}
 	} else {
+		bam_dmux_log("%s: bad state change\n", __func__);
 		pr_err("%s: unsupported state change\n", __func__);
 	}
 
@@ -1455,7 +1691,8 @@
 static void bam_dmux_smsm_ack_cb(void *priv, uint32_t old_state,
 						uint32_t new_state)
 {
-	pr_info("%s: 0x%08x -> 0x%08x\n", __func__, old_state, new_state);
+	bam_dmux_log("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
+			new_state);
 	complete_all(&ul_wakeup_ack_completion);
 }
 
@@ -1548,6 +1785,7 @@
 
 static int __init bam_dmux_init(void)
 {
+	int ret;
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *dent;
 
@@ -1556,8 +1794,16 @@
 		debug_create("tbl", 0444, dent, debug_tbl);
 		debug_create("ul_pkt_cnt", 0444, dent, debug_ul_pkt_cnt);
 		debug_create("stats", 0444, dent, debug_stats);
+		debugfs_create_file("log", 0444, dent, debug_log,
+				&debug_ops_multiple);
 	}
 #endif
+	ret = kfifo_alloc(&bam_dmux_state_log, PAGE_SIZE, GFP_KERNEL);
+	if (ret) {
+		pr_err("%s: failed to allocate log %d\n", __func__, ret);
+		bam_dmux_state_logging_disabled = 1;
+	}
+
 	subsys_notif_register_notifier("modem", &restart_notifier);
 	return platform_driver_register(&bam_dmux_driver);
 }