msm: qdss: restore trace state for cores early during hotplug on
Earlier we were relying on non-boot cores going through the first
idle WFI or idle power collapse after hotplug turn on to restore the
trace state from the previous hotplug turn off.
This could take a long time (causing us to loose trace information
during this time) and more so now since we don't save and restore
trace state across WFI. This could also cause the state to not get
restored if idle power collapse is disabled for non-boot cores.
We now restore the trace state for non-boot cores early on as part of
hotplug turn on itself to alleviate above pitfalls.
Signed-off-by: Pratik Patel <pratikp@codeaurora.org>
diff --git a/arch/arm/mach-msm/etm.c b/arch/arm/mach-msm/etm.c
index b970786..81c0478 100644
--- a/arch/arm/mach-msm/etm.c
+++ b/arch/arm/mach-msm/etm.c
@@ -70,7 +70,6 @@
#define PROG_TIMEOUT_MS 500
static int trace_enabled;
-static int *cpu_restore;
static int cpu_to_dump;
static int next_cpu_to_dump;
static struct wake_lock etm_wake_lock;
@@ -389,7 +388,6 @@
etm_control |= ETM_CONTROL_POWERDOWN;
etm_write_reg(ETM_REG_CONTROL, etm_control);
- __cpu_enable_etm();
__cpu_disable_etb();
put_cpu();
@@ -412,9 +410,12 @@
smp_call_function(__cpu_enable_trace, NULL, 1);
put_cpu();
- /* When the smp_call returns, we are guaranteed that all online
- * cpus are out of wfi/power_collapse and won't be allowed to enter
- * again due to the pm_qos latency request above.
+ /* 1. causes all online cpus to come out of idle PC
+ * 2. prevents idle PC until save restore flag is enabled atomically
+ *
+ * we rely on the user to prevent hotplug on/off racing with this
+ * operation and to ensure cores where trace is expected to be turned
+ * on are already hotplugged on
*/
trace_enabled = 1;
@@ -424,8 +425,6 @@
static void disable_trace(void)
{
- int cpu;
-
wake_lock(&etm_wake_lock);
pm_qos_update_request(&etm_qos_req, 0);
@@ -434,15 +433,15 @@
smp_call_function(__cpu_disable_trace, NULL, 1);
put_cpu();
- /* When the smp_call returns, we are guaranteed that all online
- * cpus are out of wfi/power_collapse and won't be allowed to enter
- * again due to the pm_qos latency request above.
+ /* 1. causes all online cpus to come out of idle PC
+ * 2. prevents idle PC until save restore flag is disabled atomically
+ *
+ * we rely on the user to prevent hotplug on/off racing with this
+ * operation and to ensure cores where trace is expected to be turned
+ * off are already hotplugged on
*/
trace_enabled = 0;
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(cpu_restore, cpu) = 0;
-
cpu_to_dump = next_cpu_to_dump = 0;
pm_qos_update_request(&etm_qos_req, PM_QOS_DEFAULT_VALUE);
@@ -904,7 +903,7 @@
/* etm_save_reg_check and etm_restore_reg_check should be fast
*
* These functions will be called either from:
- * 1. per_cpu idle thread context for idle wfi and power collapses.
+ * 1. per_cpu idle thread context for idle power collapses.
* 2. per_cpu idle thread context for hotplug/suspend power collapse for
* nonboot cpus.
* 3. suspend thread context for core0.
@@ -914,33 +913,21 @@
*
* Another assumption is that etm registers won't change after trace_enabled
* is set. Current usage model guarantees this doesn't happen.
+ *
+ * Also disabling all types of power_collapses when enabling and disabling
+ * trace provides mutual exclusion to be able to safely access
+ * ptm.trace_enabled here.
*/
void etm_save_reg_check(void)
{
- if (trace_enabled) {
- int cpu = smp_processor_id();
-
- /* Don't save the registers if we just got called from per_cpu
- * idle thread context of a nonboot cpu after hotplug/suspend
- * power collapse. This is to prevent corruption due to saving
- * twice since nonboot cpus start out fresh without the
- * corresponding restore.
- */
- if (!(*per_cpu_ptr(cpu_restore, cpu))) {
- etm_save_reg();
- *per_cpu_ptr(cpu_restore, cpu) = 1;
- }
- }
+ if (trace_enabled)
+ etm_save_reg();
}
void etm_restore_reg_check(void)
{
- if (trace_enabled) {
- int cpu = smp_processor_id();
-
+ if (trace_enabled)
etm_restore_reg();
- *per_cpu_ptr(cpu_restore, cpu) = 0;
- }
}
static int __init etm_init(void)
@@ -958,13 +945,6 @@
for_each_possible_cpu(cpu)
*per_cpu_ptr(alloc_b, cpu) = &buf[cpu];
- cpu_restore = alloc_percpu(int);
- if (!cpu_restore)
- goto err2;
-
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(cpu_restore, cpu) = 0;
-
wake_lock_init(&etm_wake_lock, WAKE_LOCK_SUSPEND, "msm_etm");
pm_qos_add_request(&etm_qos_req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
@@ -978,24 +958,21 @@
return 0;
-err2:
- free_percpu(alloc_b);
err1:
misc_deregister(&etm_dev);
return -ENOMEM;
}
+module_init(etm_init);
static void __exit etm_exit(void)
{
disable_trace();
pm_qos_remove_request(&etm_qos_req);
wake_lock_destroy(&etm_wake_lock);
- free_percpu(cpu_restore);
free_percpu(alloc_b);
misc_deregister(&etm_dev);
}
-
-module_init(etm_init);
module_exit(etm_exit);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("embedded trace driver");
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 9a445a5..c86fe87 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -47,6 +47,7 @@
#include "scm-boot.h"
#include "spm.h"
#include "timer.h"
+#include "qdss.h"
/******************************************************************************
* Debug Definitions
@@ -1100,6 +1101,7 @@
dev->warm_boot = 1;
return 0;
}
+ etm_restore_reg_check();
#ifdef CONFIG_VFP
vfp_reinit();
#endif
diff --git a/arch/arm/mach-msm/qdss-ptm.c b/arch/arm/mach-msm/qdss-ptm.c
index ffd0b8d..28dd171 100644
--- a/arch/arm/mach-msm/qdss-ptm.c
+++ b/arch/arm/mach-msm/qdss-ptm.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/smp.h>
-#include <linux/percpu.h>
#include <linux/wakelock.h>
#include <linux/pm_qos_params.h>
#include <linux/clk.h>
@@ -188,7 +187,6 @@
void __iomem *base;
uint32_t *state;
bool trace_enabled;
- int *cpu_restore;
struct wake_lock wake_lock;
struct pm_qos_request_list qos_req;
atomic_t in_use;
@@ -371,8 +369,12 @@
return ret;
wake_lock(&ptm.wake_lock);
- /* 1. causes all cpus to come out of idle PC
+ /* 1. causes all online cpus to come out of idle PC
* 2. prevents idle PC until save restore flag is enabled atomically
+ *
+ * we rely on the user to prevent hotplug on/off racing with this
+ * operation and to ensure cores where trace is expected to be turned
+ * on are already hotplugged on
*/
pm_qos_update_request(&ptm.qos_req, 0);
@@ -411,16 +413,16 @@
static void ptm_trace_disable(void)
{
- int cpu;
-
wake_lock(&ptm.wake_lock);
- /* 1. causes all cpus to come out of idle PC
+ /* 1. causes all online cpus to come out of idle PC
* 2. prevents idle PC until save restore flag is disabled atomically
+ *
+ * we rely on the user to prevent hotplug on/off racing with this
+ * operation and to ensure cores where trace is expected to be turned
+ * off are already hotplugged on
*/
pm_qos_update_request(&ptm.qos_req, 0);
- ptm_os_unlock(NULL);
- smp_call_function(ptm_os_unlock, NULL, 1);
__ptm_trace_disable();
etb_dump();
etb_disable();
@@ -428,9 +430,6 @@
ptm.trace_enabled = false;
- for_each_online_cpu(cpu)
- *per_cpu_ptr(ptm.cpu_restore, cpu) = 0;
-
pm_qos_update_request(&ptm.qos_req, PM_QOS_DEFAULT_VALUE);
wake_unlock(&ptm.wake_lock);
@@ -715,40 +714,24 @@
*
* Another assumption is that etm registers won't change after trace_enabled
* is set. Current usage model guarantees this doesn't happen.
+ *
+ * Also disabling all types of power_collapses when enabling and disabling
+ * trace provides mutual exclusion to be able to safely access
+ * ptm.trace_enabled here.
*/
void etm_save_reg_check(void)
{
- /* Disabling all kinds of power_collapses when enabling and disabling
- * trace provides mutual exclusion to be able to safely access
- * ptm.trace_enabled here.
- */
if (ptm.trace_enabled) {
int cpu = smp_processor_id();
-
- /* Don't save the registers if we just got called from per_cpu
- * idle thread context of a nonboot cpu after hotplug/suspend
- * power collapse. This is to prevent corruption due to saving
- * twice since nonboot cpus start out fresh without the
- * corresponding restore.
- */
- if (!(*per_cpu_ptr(ptm.cpu_restore, cpu))) {
- ptm_save_reg(cpu);
- *per_cpu_ptr(ptm.cpu_restore, cpu) = 1;
- }
+ ptm_save_reg(cpu);
}
}
void etm_restore_reg_check(void)
{
- /* Disabling all kinds of power_collapses when enabling and disabling
- * trace provides mutual exclusion to be able to safely access
- * ptm.trace_enabled here.
- */
if (ptm.trace_enabled) {
int cpu = smp_processor_id();
-
ptm_restore_reg(cpu);
- *per_cpu_ptr(ptm.cpu_restore, cpu) = 0;
}
}
@@ -825,7 +808,7 @@
static int __devinit ptm_probe(struct platform_device *pdev)
{
- int ret, cpu;
+ int ret;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -913,15 +896,6 @@
ptm.trace_enabled = false;
- ptm.cpu_restore = alloc_percpu(int);
- if (!ptm.cpu_restore) {
- ret = -ENOMEM;
- goto err_percpu;
- }
-
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(ptm.cpu_restore, cpu) = 0;
-
wake_lock_init(&ptm.wake_lock, WAKE_LOCK_SUSPEND, "msm_ptm");
pm_qos_add_request(&ptm.qos_req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
@@ -944,8 +918,6 @@
return 0;
-err_percpu:
- clk_disable(ptm.qdss_tsctr_clk);
err_tsctr_enable:
err_tsctr_rate:
clk_put(ptm.qdss_tsctr_clk);
@@ -984,7 +956,6 @@
ptm_trace_disable();
pm_qos_remove_request(&ptm.qos_req);
wake_lock_destroy(&ptm.wake_lock);
- free_percpu(ptm.cpu_restore);
clk_put(ptm.qdss_tsctr_clk);
clk_put(ptm.qdss_traceclkin_clk);
clk_put(ptm.qdss_pclk);
diff --git a/arch/arm/mach-msm/qdss.h b/arch/arm/mach-msm/qdss.h
index b94e645..e4e6b0c 100644
--- a/arch/arm/mach-msm/qdss.h
+++ b/arch/arm/mach-msm/qdss.h
@@ -54,4 +54,12 @@
void funnel_enable(uint8_t id, uint32_t port_mask);
void funnel_disable(uint8_t id, uint32_t port_mask);
+#ifdef CONFIG_MSM_TRACE_ACROSS_PC
+extern void etm_save_reg_check(void);
+extern void etm_restore_reg_check(void);
+#else
+static inline void etm_save_reg_check(void) {}
+static inline void etm_restore_reg_check(void) {}
+#endif
+
#endif