msm: acpuclock-krait: Pre-emptively vote for the L2 HFPLL regulators
It is necessary that the L2 level computation and the setting
of the L2 rate happen atomically, so that the computed level
is still valid when the L2 rate is set. A spinlock protected
critical section is needed for these operations. However, the
votes for the HFPLLs necessitate invocation of the RPM
regulator APIs from a non-atomic context.
To solve this, we must first note that the problem only exists
when:
1. The L2 frequency is being switched away from an HFPLL source
to a non-HFPLL source, or
the L2 frequency is being switched away from a non-HFPLL
source to an HFPLL source.
--AND--
2. The CPU frequency switch (the cause of the L2 switch) is
being performed as a result of cpufreq invoking the switch or
hotplug onlining/offlining a core, since HFPLL regulator
voting does not take place in the power-collapse and resume
paths.
The solution is to pre-emptively vote for the L2 HFPLL regulators
if the target CPU frequency's required L2 level is sourced off of
an HFPLL. These votes are only removed after the spinlock
protected critical section completes, and only if the previous
CPU frequency's required L2 level was already sourced off of an
HFPLL source.
One further optimization is to disable only pre-emption, and
not interrupts in the afore-mentioned critical section.
Change-Id: I7b01f274f773ce513300ed3e4b074bae63e84c64
Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index b3e6145..64b162e 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -437,6 +437,47 @@
return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
}
+static DEFINE_MUTEX(l2_regulator_lock);
+static int l2_vreg_count;
+
+static int enable_l2_regulators(void)
+{
+ int ret = 0;
+
+ mutex_lock(&l2_regulator_lock);
+ if (l2_vreg_count == 0) {
+ ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
+ if (ret)
+ goto out;
+ ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
+ if (ret) {
+ disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
+ goto out;
+ }
+ }
+ l2_vreg_count++;
+out:
+ mutex_unlock(&l2_regulator_lock);
+
+ return ret;
+}
+
+static void disable_l2_regulators(void)
+{
+ mutex_lock(&l2_regulator_lock);
+
+ if (WARN(!l2_vreg_count, "L2 regulator votes are unbalanced!"))
+ goto out;
+
+ if (l2_vreg_count == 1) {
+ disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
+ disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
+ }
+ l2_vreg_count--;
+out:
+ mutex_unlock(&l2_regulator_lock);
+}
+
/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
static int acpuclk_krait_set_rate(int cpu, unsigned long rate,
enum setrate_reason reason)
@@ -444,8 +485,8 @@
const struct core_speed *strt_acpu_s, *tgt_acpu_s;
const struct acpu_level *tgt;
int tgt_l2_l;
+ enum src_id prev_l2_src = NUM_SRC_ID;
struct vdd_data vdd_data;
- unsigned long flags;
bool skip_regulators;
int rc = 0;
@@ -490,6 +531,15 @@
rc = increase_vdd(cpu, &vdd_data, reason);
if (rc)
goto out;
+
+ prev_l2_src =
+ drv.l2_freq_tbl[drv.scalable[cpu].l2_vote].speed.src;
+ /* Vote for the L2 regulators here if necessary. */
+ if (drv.l2_freq_tbl[tgt->l2_level].speed.src == HFPLL) {
+ rc = enable_l2_regulators();
+ if (rc)
+ goto out;
+ }
}
dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
@@ -514,16 +564,23 @@
* called from an atomic context and the driver_lock mutex is not
* acquired.
*/
- spin_lock_irqsave(&l2_lock, flags);
+ spin_lock(&l2_lock);
tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
- set_speed(&drv.scalable[L2], &drv.l2_freq_tbl[tgt_l2_l].speed,
- skip_regulators);
- spin_unlock_irqrestore(&l2_lock, flags);
+ set_speed(&drv.scalable[L2],
+ &drv.l2_freq_tbl[tgt_l2_l].speed, true);
+ spin_unlock(&l2_lock);
/* Nothing else to do for power collapse or SWFI. */
if (reason == SETRATE_PC || reason == SETRATE_SWFI)
goto out;
+ /*
+ * Remove the vote for the L2 HFPLL regulators only if the L2
+ * was already on an HFPLL source.
+ */
+ if (prev_l2_src == HFPLL)
+ disable_l2_regulators();
+
/* Update bus bandwith request. */
set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
@@ -685,6 +742,14 @@
goto err_core_conf;
}
+ /*
+ * Increment the L2 HFPLL regulator refcount if _this_ CPU's frequency
+ * requires a corresponding target L2 frequency that needs the L2 to
+ * run off of an HFPLL.
+ */
+ if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL)
+ l2_vreg_count++;
+
return 0;
err_core_conf:
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index 4b6834d..4db95b3 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -39,6 +39,7 @@
PLL_0 = 0,
HFPLL,
PLL_8,
+ NUM_SRC_ID,
};
/**