msm: qdss: restore trace state for cores early during hotplug on

Earlier we were relying on non-boot cores going through the first
idle WFI or idle power collapse after hotplug turn on to restore the
trace state from the previous hotplug turn off.

This could take a long time (causing us to loose trace information
during this time) and more so now since we don't save and restore
trace state across WFI. This could also cause the state to not get
restored if idle power collapse is disabled for non-boot cores.

We now restore the trace state for non-boot cores early on as part of
hotplug turn on itself to alleviate above pitfalls.

Signed-off-by: Pratik Patel <pratikp@codeaurora.org>
diff --git a/arch/arm/mach-msm/qdss-ptm.c b/arch/arm/mach-msm/qdss-ptm.c
index ffd0b8d..28dd171 100644
--- a/arch/arm/mach-msm/qdss-ptm.c
+++ b/arch/arm/mach-msm/qdss-ptm.c
@@ -21,7 +21,6 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/smp.h>
-#include <linux/percpu.h>
 #include <linux/wakelock.h>
 #include <linux/pm_qos_params.h>
 #include <linux/clk.h>
@@ -188,7 +187,6 @@
 	void __iomem			*base;
 	uint32_t			*state;
 	bool				trace_enabled;
-	int				*cpu_restore;
 	struct wake_lock		wake_lock;
 	struct pm_qos_request_list	qos_req;
 	atomic_t			in_use;
@@ -371,8 +369,12 @@
 		return ret;
 
 	wake_lock(&ptm.wake_lock);
-	/* 1. causes all cpus to come out of idle PC
+	/* 1. causes all online cpus to come out of idle PC
 	 * 2. prevents idle PC until save restore flag is enabled atomically
+	 *
+	 * we rely on the user to prevent hotplug on/off racing with this
+	 * operation and to ensure cores where trace is expected to be turned
+	 * on are already hotplugged on
 	 */
 	pm_qos_update_request(&ptm.qos_req, 0);
 
@@ -411,16 +413,16 @@
 
 static void ptm_trace_disable(void)
 {
-	int cpu;
-
 	wake_lock(&ptm.wake_lock);
-	/* 1. causes all cpus to come out of idle PC
+	/* 1. causes all online cpus to come out of idle PC
 	 * 2. prevents idle PC until save restore flag is disabled atomically
+	 *
+	 * we rely on the user to prevent hotplug on/off racing with this
+	 * operation and to ensure cores where trace is expected to be turned
+	 * off are already hotplugged on
 	 */
 	pm_qos_update_request(&ptm.qos_req, 0);
 
-	ptm_os_unlock(NULL);
-	smp_call_function(ptm_os_unlock, NULL, 1);
 	__ptm_trace_disable();
 	etb_dump();
 	etb_disable();
@@ -428,9 +430,6 @@
 
 	ptm.trace_enabled = false;
 
-	for_each_online_cpu(cpu)
-		*per_cpu_ptr(ptm.cpu_restore, cpu) = 0;
-
 	pm_qos_update_request(&ptm.qos_req, PM_QOS_DEFAULT_VALUE);
 	wake_unlock(&ptm.wake_lock);
 
@@ -715,40 +714,24 @@
  *
  * Another assumption is that etm registers won't change after trace_enabled
  * is set. Current usage model guarantees this doesn't happen.
+ *
+ * Also disabling all types of power_collapses when enabling and disabling
+ * trace provides mutual exclusion to be able to safely access
+ * ptm.trace_enabled here.
  */
 void etm_save_reg_check(void)
 {
-	/* Disabling all kinds of power_collapses when enabling and disabling
-	 * trace provides mutual exclusion to be able to safely access
-	 * ptm.trace_enabled here.
-	 */
 	if (ptm.trace_enabled) {
 		int cpu = smp_processor_id();
-
-		/* Don't save the registers if we just got called from per_cpu
-		 * idle thread context of a nonboot cpu after hotplug/suspend
-		 * power collapse. This is to prevent corruption due to saving
-		 * twice since nonboot cpus start out fresh without the
-		 * corresponding restore.
-		 */
-		if (!(*per_cpu_ptr(ptm.cpu_restore, cpu))) {
-			ptm_save_reg(cpu);
-			*per_cpu_ptr(ptm.cpu_restore, cpu) = 1;
-		}
+		ptm_save_reg(cpu);
 	}
 }
 
 void etm_restore_reg_check(void)
 {
-	/* Disabling all kinds of power_collapses when enabling and disabling
-	 * trace provides mutual exclusion to be able to safely access
-	 * ptm.trace_enabled here.
-	 */
 	if (ptm.trace_enabled) {
 		int cpu = smp_processor_id();
-
 		ptm_restore_reg(cpu);
-		*per_cpu_ptr(ptm.cpu_restore, cpu) = 0;
 	}
 }
 
@@ -825,7 +808,7 @@
 
 static int __devinit ptm_probe(struct platform_device *pdev)
 {
-	int ret, cpu;
+	int ret;
 	struct resource *res;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -913,15 +896,6 @@
 
 	ptm.trace_enabled = false;
 
-	ptm.cpu_restore = alloc_percpu(int);
-	if (!ptm.cpu_restore) {
-		ret = -ENOMEM;
-		goto err_percpu;
-	}
-
-	for_each_possible_cpu(cpu)
-		*per_cpu_ptr(ptm.cpu_restore, cpu) = 0;
-
 	wake_lock_init(&ptm.wake_lock, WAKE_LOCK_SUSPEND, "msm_ptm");
 	pm_qos_add_request(&ptm.qos_req, PM_QOS_CPU_DMA_LATENCY,
 						PM_QOS_DEFAULT_VALUE);
@@ -944,8 +918,6 @@
 
 	return 0;
 
-err_percpu:
-	clk_disable(ptm.qdss_tsctr_clk);
 err_tsctr_enable:
 err_tsctr_rate:
 	clk_put(ptm.qdss_tsctr_clk);
@@ -984,7 +956,6 @@
 		ptm_trace_disable();
 	pm_qos_remove_request(&ptm.qos_req);
 	wake_lock_destroy(&ptm.wake_lock);
-	free_percpu(ptm.cpu_restore);
 	clk_put(ptm.qdss_tsctr_clk);
 	clk_put(ptm.qdss_traceclkin_clk);
 	clk_put(ptm.qdss_pclk);