Merge "msm_fb: display: skip one vsync event after enabling mdp and vsync clk"
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index bfb3df3..64b162e 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -219,7 +219,8 @@
 }
 
 /* Set the CPU or L2 clock speed. */
-static void set_speed(struct scalable *sc, const struct core_speed *tgt_s)
+static void set_speed(struct scalable *sc, const struct core_speed *tgt_s,
+	bool skip_regulators)
 {
 	const struct core_speed *strt_s = sc->cur_speed;
 
@@ -242,10 +243,10 @@
 		set_pri_clk_src(sc, tgt_s->pri_src_sel);
 	} else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
 		set_pri_clk_src(sc, tgt_s->pri_src_sel);
-		hfpll_disable(sc, false);
+		hfpll_disable(sc, skip_regulators);
 	} else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
 		hfpll_set_rate(sc, tgt_s);
-		hfpll_enable(sc, false);
+		hfpll_enable(sc, skip_regulators);
 		set_pri_clk_src(sc, tgt_s->pri_src_sel);
 	}
 
@@ -436,6 +437,47 @@
 	return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
 }
 
+static DEFINE_MUTEX(l2_regulator_lock);
+static int l2_vreg_count;
+
+static int enable_l2_regulators(void)
+{
+	int ret = 0;
+
+	mutex_lock(&l2_regulator_lock);
+	if (l2_vreg_count == 0) {
+		ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
+		if (ret)
+			goto out;
+		ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
+		if (ret) {
+			disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
+			goto out;
+		}
+	}
+	l2_vreg_count++;
+out:
+	mutex_unlock(&l2_regulator_lock);
+
+	return ret;
+}
+
+static void disable_l2_regulators(void)
+{
+	mutex_lock(&l2_regulator_lock);
+
+	if (WARN(!l2_vreg_count, "L2 regulator votes are unbalanced!"))
+		goto out;
+
+	if (l2_vreg_count == 1) {
+		disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
+		disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
+	}
+	l2_vreg_count--;
+out:
+	mutex_unlock(&l2_regulator_lock);
+}
+
 /* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
 static int acpuclk_krait_set_rate(int cpu, unsigned long rate,
 				  enum setrate_reason reason)
@@ -443,8 +485,9 @@
 	const struct core_speed *strt_acpu_s, *tgt_acpu_s;
 	const struct acpu_level *tgt;
 	int tgt_l2_l;
+	enum src_id prev_l2_src = NUM_SRC_ID;
 	struct vdd_data vdd_data;
-	unsigned long flags;
+	bool skip_regulators;
 	int rc = 0;
 
 	if (cpu > num_possible_cpus())
@@ -488,13 +531,31 @@
 		rc = increase_vdd(cpu, &vdd_data, reason);
 		if (rc)
 			goto out;
+
+		prev_l2_src =
+			drv.l2_freq_tbl[drv.scalable[cpu].l2_vote].speed.src;
+		/* Vote for the L2 regulators here if necessary. */
+		if (drv.l2_freq_tbl[tgt->l2_level].speed.src == HFPLL) {
+			rc = enable_l2_regulators();
+			if (rc)
+				goto out;
+		}
 	}
 
 	dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
 		cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
 
+	/*
+	 * If we are setting the rate as part of power collapse or in the resume
+	 * path after power collapse, skip the vote for the HFPLL regulators,
+	 * which are active-set-only votes that will be removed when apps enters
+	 * its sleep set. This is needed to avoid voting for regulators with
+	 * sleeping APIs from an atomic context.
+	 */
+	skip_regulators = (reason == SETRATE_PC);
+
 	/* Set the new CPU speed. */
-	set_speed(&drv.scalable[cpu], tgt_acpu_s);
+	set_speed(&drv.scalable[cpu], tgt_acpu_s, skip_regulators);
 
 	/*
 	 * Update the L2 vote and apply the rate change. A spinlock is
@@ -503,15 +564,23 @@
 	 * called from an atomic context and the driver_lock mutex is not
 	 * acquired.
 	 */
-	spin_lock_irqsave(&l2_lock, flags);
+	spin_lock(&l2_lock);
 	tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
-	set_speed(&drv.scalable[L2], &drv.l2_freq_tbl[tgt_l2_l].speed);
-	spin_unlock_irqrestore(&l2_lock, flags);
+	set_speed(&drv.scalable[L2],
+			&drv.l2_freq_tbl[tgt_l2_l].speed, true);
+	spin_unlock(&l2_lock);
 
 	/* Nothing else to do for power collapse or SWFI. */
 	if (reason == SETRATE_PC || reason == SETRATE_SWFI)
 		goto out;
 
+	/*
+	 * Remove the vote for the L2 HFPLL regulators only if the L2
+	 * was already on an HFPLL source.
+	 */
+	if (prev_l2_src == HFPLL)
+		disable_l2_regulators();
+
 	/* Update bus bandwith request. */
 	set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
 
@@ -673,6 +742,14 @@
 		goto err_core_conf;
 	}
 
+	/*
+	 * Increment the L2 HFPLL regulator refcount if _this_ CPU's frequency
+	 * requires a corresponding target L2 frequency that needs the L2 to
+	 * run off of an HFPLL.
+	 */
+	if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL)
+		l2_vreg_count++;
+
 	return 0;
 
 err_core_conf:
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index 4b6834d..4db95b3 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -39,6 +39,7 @@
 	PLL_0 = 0,
 	HFPLL,
 	PLL_8,
+	NUM_SRC_ID,
 };
 
 /**
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index c0eb41c..6d2e42e 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -3401,6 +3401,54 @@
 };
 
 #ifdef CONFIG_MSM_GEMINI
+
+static struct msm_bus_vectors gemini_init_vector[] = {
+	{
+		.src = MSM_BUS_MASTER_JPEG_ENC,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_JPEG_ENC,
+		.dst = MSM_BUS_SLAVE_MM_IMEM,
+		.ab  = 0,
+		.ib  = 0,
+	},
+};
+
+static struct msm_bus_vectors gemini_encode_vector[] = {
+	{
+		.src = MSM_BUS_MASTER_JPEG_ENC,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 540000000,
+		.ib  = 1350000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_JPEG_ENC,
+		.dst = MSM_BUS_SLAVE_MM_IMEM,
+		.ab  = 43200000,
+		.ib  = 69120000,
+	},
+};
+
+static struct msm_bus_paths gemini_bus_path[] = {
+	{
+		ARRAY_SIZE(gemini_init_vector),
+		gemini_init_vector,
+	},
+	{
+		ARRAY_SIZE(gemini_encode_vector),
+		gemini_encode_vector,
+	},
+};
+
+static struct msm_bus_scale_pdata gemini_bus_scale_pdata = {
+	gemini_bus_path,
+	ARRAY_SIZE(gemini_bus_path),
+	.name = "msm_gemini",
+};
+
 static struct resource msm_gemini_resources[] = {
 	{
 		.start  = 0x04600000,
@@ -3418,6 +3466,9 @@
 	.name           = "msm_gemini",
 	.resource       = msm_gemini_resources,
 	.num_resources  = ARRAY_SIZE(msm_gemini_resources),
+	.dev = {
+		.platform_data = &gemini_bus_scale_pdata,
+	},
 };
 #endif
 
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator.h b/arch/arm/mach-msm/include/mach/rpm-regulator.h
index 075d20f..b063b97 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator.h
@@ -101,8 +101,7 @@
  * @init_data:		regulator constraints
  * @id:			regulator id; from enum rpm_vreg_id
  * @sleep_selectable:	flag which indicates that regulator should be accessable
- *			by external private API and that spinlocks should be
- *			used instead of mutex locks
+ *			by external private API
  * @system_uA:		current drawn from regulator not accounted for by any
  *			regulator framework consumer
  * @enable_time:	time in us taken to enable a regulator to the maximum
@@ -184,10 +183,8 @@
  * Returns 0 on success or errno.
  *
  * This function is used to vote for the voltage of a regulator without
- * using the regulator framework.  It is needed by consumers which hold spin
- * locks or have interrupts disabled because the regulator framework can sleep.
- * It is also needed by consumers which wish to only vote for active set
- * regulator voltage.
+ * using the regulator framework.  It is needed for consumers which wish to only
+ * vote for active set regulator voltage.
  *
  * If sleep_also == 0, then a sleep-set value of 0V will be voted for.
  *
diff --git a/arch/arm/mach-msm/rpm-regulator.c b/arch/arm/mach-msm/rpm-regulator.c
index 01543a2..4e5281d 100644
--- a/arch/arm/mach-msm/rpm-regulator.c
+++ b/arch/arm/mach-msm/rpm-regulator.c
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/platform_device.h>
 #include <linux/wakelock.h>
@@ -299,12 +298,9 @@
 }
 
 static bool requires_tcxo_workaround;
-static bool tcxo_workaround_noirq;
 static struct clk *tcxo_handle;
 static struct wake_lock tcxo_wake_lock;
 static DEFINE_MUTEX(tcxo_mutex);
-/* Spin lock needed for sleep-selectable regulators. */
-static DEFINE_SPINLOCK(tcxo_noirq_lock);
 static bool tcxo_is_enabled;
 /*
  * TCXO must be kept on for at least the duration of its warmup (4 ms);
@@ -314,19 +310,10 @@
 
 static void tcxo_get_handle(void)
 {
-	int rc;
-
 	if (!tcxo_handle) {
 		tcxo_handle = clk_get_sys("rpm-regulator", "vref_buff");
-		if (IS_ERR(tcxo_handle)) {
+		if (IS_ERR(tcxo_handle))
 			tcxo_handle = NULL;
-		} else {
-			rc = clk_prepare(tcxo_handle);
-			if (rc) {
-				clk_put(tcxo_handle);
-				tcxo_handle = NULL;
-			}
-		}
 	}
 }
 
@@ -342,7 +329,7 @@
 	int rc;
 
 	if (tcxo_handle && !tcxo_is_enabled) {
-		rc = clk_enable(tcxo_handle);
+		rc = clk_prepare_enable(tcxo_handle);
 		if (!rc) {
 			tcxo_is_enabled = true;
 			wake_lock(&tcxo_wake_lock);
@@ -355,21 +342,13 @@
 
 static void tcxo_delayed_disable_work(struct work_struct *work)
 {
-	unsigned long flags = 0;
+	mutex_lock(&tcxo_mutex);
 
-	if (tcxo_workaround_noirq)
-		spin_lock_irqsave(&tcxo_noirq_lock, flags);
-	else
-		mutex_lock(&tcxo_mutex);
-
-	clk_disable(tcxo_handle);
+	clk_disable_unprepare(tcxo_handle);
 	tcxo_is_enabled = false;
 	wake_unlock(&tcxo_wake_lock);
 
-	if (tcxo_workaround_noirq)
-		spin_unlock_irqrestore(&tcxo_noirq_lock, flags);
-	else
-		mutex_unlock(&tcxo_mutex);
+	mutex_unlock(&tcxo_mutex);
 }
 
 static DECLARE_DELAYED_WORK(tcxo_disable_work, tcxo_delayed_disable_work);
@@ -387,8 +366,8 @@
 				msecs_to_jiffies(TCXO_WARMUP_TIME_MS) + 1);
 }
 
-/* Spin lock needed for sleep-selectable regulators. */
-static DEFINE_SPINLOCK(rpm_noirq_lock);
+/* Mutex lock needed for sleep-selectable regulators. */
+static DEFINE_MUTEX(rpm_sleep_sel_lock);
 
 static int voltage_from_req(struct vreg *vreg)
 {
@@ -421,7 +400,6 @@
 {
 	struct msm_rpm_iv_pair *prev_req;
 	int rc = 0, max_uV_vote = 0;
-	unsigned long flags = 0;
 	bool tcxo_enabled = false;
 	bool voltage_increased = false;
 	unsigned prev0, prev1;
@@ -470,17 +448,19 @@
 		if (requires_tcxo_workaround && vreg->requires_cxo
 		    && (set == MSM_RPM_CTX_SET_0)
 		    && (GET_PART(vreg, uV) > GET_PART_PREV_ACT(vreg, uV))) {
+			mutex_lock(&tcxo_mutex);
+			if (!tcxo_handle)
+				tcxo_get_handle();
 			voltage_increased = true;
-			spin_lock_irqsave(&tcxo_noirq_lock, flags);
 			tcxo_enabled = tcxo_enable();
 		}
 
-		rc = msm_rpmrs_set_noirq(set, vreg->req, cnt);
+		rc = msm_rpmrs_set(set, vreg->req, cnt);
 		if (rc) {
 			vreg->req[0].value = prev0;
 			vreg->req[1].value = prev1;
 
-			vreg_err(vreg, "msm_rpmrs_set_noirq failed - "
+			vreg_err(vreg, "msm_rpmrs_set failed - "
 				"set=%s, id=%d, rc=%d\n",
 				(set == MSM_RPM_CTX_SET_0 ? "active" : "sleep"),
 				vreg->req[0].id, rc);
@@ -502,7 +482,7 @@
 		if (voltage_increased) {
 			if (tcxo_enabled)
 				tcxo_delayed_disable();
-			spin_unlock_irqrestore(&tcxo_noirq_lock, flags);
+			mutex_unlock(&tcxo_mutex);
 		}
 	} else if (msm_rpm_vreg_debug_mask & MSM_RPM_VREG_DEBUG_DUPLICATE) {
 		rpm_regulator_duplicate(vreg, set, cnt);
@@ -511,19 +491,18 @@
 	return rc;
 }
 
-static int vreg_set_noirq(struct vreg *vreg, enum rpm_vreg_voter voter,
+static int vreg_set_sleep_sel(struct vreg *vreg, enum rpm_vreg_voter voter,
 			  int sleep, unsigned mask0, unsigned val0,
 			  unsigned mask1, unsigned val1, unsigned cnt,
 			  int update_voltage)
 {
 	unsigned int s_mask[2] = {mask0, mask1}, s_val[2] = {val0, val1};
-	unsigned long flags;
 	int rc;
 
 	if (voter < 0 || voter >= RPM_VREG_VOTER_COUNT)
 		return -EINVAL;
 
-	spin_lock_irqsave(&rpm_noirq_lock, flags);
+	mutex_lock(&rpm_sleep_sel_lock);
 
 	/*
 	 * Send sleep set request first so that subsequent set_mode, etc calls
@@ -559,7 +538,7 @@
 	rc = vreg_send_request(vreg, voter, MSM_RPM_CTX_SET_0, mask0, val0,
 					mask1, val1, cnt, update_voltage);
 
-	spin_unlock_irqrestore(&rpm_noirq_lock, flags);
+	mutex_unlock(&rpm_sleep_sel_lock);
 
 	return rc;
 }
@@ -575,10 +554,8 @@
  * Returns 0 on success or errno.
  *
  * This function is used to vote for the voltage of a regulator without
- * using the regulator framework.  It is needed by consumers which hold spin
- * locks or have interrupts disabled because the regulator framework can sleep.
- * It is also needed by consumers which wish to only vote for active set
- * regulator voltage.
+ * using the regulator framework.  It is needed for consumers which wish to only
+ * vote for active set regulator voltage.
  *
  * If sleep_also == 0, then a sleep-set value of 0V will be voted for.
  *
@@ -693,10 +670,10 @@
 		    = vreg->part->enable_state.mask;
 	}
 
-	rc = vreg_set_noirq(vreg, voter, sleep_also, mask[0], val[0], mask[1],
-			    val[1], vreg->part->request_len, 1);
+	rc = vreg_set_sleep_sel(vreg, voter, sleep_also, mask[0], val[0],
+				mask[1], val[1], vreg->part->request_len, 1);
 	if (rc)
-		vreg_err(vreg, "vreg_set_noirq failed, rc=%d\n", rc);
+		vreg_err(vreg, "vreg_set_sleep_sel failed, rc=%d\n", rc);
 
 	return rc;
 }
@@ -743,10 +720,10 @@
 	val[vreg->part->freq.word] = freq << vreg->part->freq.shift;
 	mask[vreg->part->freq.word] = vreg->part->freq.mask;
 
-	rc = vreg_set_noirq(vreg, RPM_VREG_VOTER_REG_FRAMEWORK, 1, mask[0],
+	rc = vreg_set_sleep_sel(vreg, RPM_VREG_VOTER_REG_FRAMEWORK, 1, mask[0],
 			   val[0], mask[1], val[1], vreg->part->request_len, 0);
 	if (rc)
-		vreg_err(vreg, "vreg_set failed, rc=%d\n", rc);
+		vreg_err(vreg, "vreg_set_sleep_sel failed, rc=%d\n", rc);
 
 	return rc;
 }
@@ -1018,10 +995,8 @@
 static int vreg_store(struct vreg *vreg, unsigned mask0, unsigned val0,
 		unsigned mask1, unsigned val1)
 {
-	unsigned long flags = 0;
-
 	if (vreg->pdata.sleep_selectable)
-		spin_lock_irqsave(&rpm_noirq_lock, flags);
+		mutex_lock(&rpm_sleep_sel_lock);
 
 	vreg->req[0].value &= ~mask0;
 	vreg->req[0].value |= val0 & mask0;
@@ -1030,7 +1005,7 @@
 	vreg->req[1].value |= val1 & mask1;
 
 	if (vreg->pdata.sleep_selectable)
-		spin_unlock_irqrestore(&rpm_noirq_lock, flags);
+		mutex_unlock(&rpm_sleep_sel_lock);
 
 	return 0;
 }
@@ -1039,7 +1014,6 @@
 		unsigned mask1, unsigned val1, unsigned cnt)
 {
 	unsigned prev0 = 0, prev1 = 0;
-	unsigned long flags = 0;
 	bool tcxo_enabled = false;
 	bool voltage_increased = false;
 	int rc;
@@ -1049,7 +1023,7 @@
 	 * just the active set values.
 	 */
 	if (vreg->pdata.sleep_selectable)
-		return vreg_set_noirq(vreg, RPM_VREG_VOTER_REG_FRAMEWORK, 1,
+		return vreg_set_sleep_sel(vreg, RPM_VREG_VOTER_REG_FRAMEWORK, 1,
 					mask0, val0, mask1, val1, cnt, 1);
 
 	prev0 = vreg->req[0].value;
@@ -1071,21 +1045,14 @@
 	/* Enable CXO clock if necessary for TCXO workaround. */
 	if (requires_tcxo_workaround && vreg->requires_cxo
 	    && (GET_PART(vreg, uV) > GET_PART_PREV_ACT(vreg, uV))) {
+		mutex_lock(&tcxo_mutex);
 		if (!tcxo_handle)
 			tcxo_get_handle();
-		if (tcxo_workaround_noirq)
-			spin_lock_irqsave(&tcxo_noirq_lock, flags);
-		else
-			mutex_lock(&tcxo_mutex);
-
 		voltage_increased = true;
 		tcxo_enabled = tcxo_enable();
 	}
 
-	if (voltage_increased && tcxo_workaround_noirq)
-		rc = msm_rpmrs_set_noirq(MSM_RPM_CTX_SET_0, vreg->req, cnt);
-	else
-		rc = msm_rpm_set(MSM_RPM_CTX_SET_0, vreg->req, cnt);
+	rc = msm_rpm_set(MSM_RPM_CTX_SET_0, vreg->req, cnt);
 
 	if (rc) {
 		vreg->req[0].value = prev0;
@@ -1107,11 +1074,7 @@
 	if (voltage_increased) {
 		if (tcxo_enabled)
 			tcxo_delayed_disable();
-
-		if (tcxo_workaround_noirq)
-			spin_unlock_irqrestore(&tcxo_noirq_lock, flags);
-		else
-			mutex_unlock(&tcxo_mutex);
+		mutex_unlock(&tcxo_mutex);
 	}
 
 	return rc;
@@ -1794,7 +1757,6 @@
 	struct rpm_regulator_platform_data *platform_data;
 	static struct rpm_regulator_consumer_mapping *prev_consumer_map;
 	static int prev_consumer_map_len;
-	struct vreg *vreg;
 	int rc = 0;
 	int i, id;
 
@@ -1880,18 +1842,6 @@
 				"rpm_regulator_tcxo");
 	}
 
-	if (requires_tcxo_workaround && !tcxo_workaround_noirq) {
-		for (i = 0; i < platform_data->num_regulators; i++) {
-			vreg = rpm_vreg_get_vreg(
-					platform_data->init_data[i].id);
-			if (vreg && vreg->requires_cxo
-			    && platform_data->init_data[i].sleep_selectable) {
-				tcxo_workaround_noirq = true;
-				break;
-			}
-		}
-	}
-
 	/* Initialize all of the regulators listed in the platform data. */
 	for (i = 0; i < platform_data->num_regulators; i++) {
 		rc = rpm_vreg_init_regulator(&platform_data->init_data[i],
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 785ba6c..6bab8db 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -92,6 +92,8 @@
 	unsigned int freq_lo_jiffies;
 	unsigned int freq_hi_jiffies;
 	unsigned int rate_mult;
+	unsigned int prev_load;
+	unsigned int max_load;
 	int cpu;
 	unsigned int sample_type:1;
 	/*
@@ -120,17 +122,27 @@
 static struct dbs_tuners {
 	unsigned int sampling_rate;
 	unsigned int up_threshold;
+	unsigned int up_threshold_multi_core;
 	unsigned int down_differential;
+	unsigned int down_differential_multi_core;
+	unsigned int optimal_freq;
+	unsigned int up_threshold_any_cpu_load;
+	unsigned int sync_freq;
 	unsigned int ignore_nice;
 	unsigned int sampling_down_factor;
 	int          powersave_bias;
 	unsigned int io_is_busy;
 } dbs_tuners_ins = {
+	.up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD,
 	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
 	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
 	.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
+	.down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL,
+	.up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD,
 	.ignore_nice = 0,
 	.powersave_bias = 0,
+	.sync_freq = 0,
+	.optimal_freq = 0,
 };
 
 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
@@ -288,9 +300,13 @@
 show_one(sampling_rate, sampling_rate);
 show_one(io_is_busy, io_is_busy);
 show_one(up_threshold, up_threshold);
+show_one(up_threshold_multi_core, up_threshold_multi_core);
 show_one(down_differential, down_differential);
 show_one(sampling_down_factor, sampling_down_factor);
 show_one(ignore_nice_load, ignore_nice);
+show_one(optimal_freq, optimal_freq);
+show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load);
+show_one(sync_freq, sync_freq);
 
 static ssize_t show_powersave_bias
 (struct kobject *kobj, struct attribute *attr, char *buf)
@@ -366,6 +382,19 @@
 	return count;
 }
 
+static ssize_t store_sync_freq(struct kobject *a, struct attribute *b,
+				   const char *buf, size_t count)
+{
+	unsigned int input;
+	int ret;
+
+	ret = sscanf(buf, "%u", &input);
+	if (ret != 1)
+		return -EINVAL;
+	dbs_tuners_ins.sync_freq = input;
+	return count;
+}
+
 static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
 				   const char *buf, size_t count)
 {
@@ -379,6 +408,19 @@
 	return count;
 }
 
+static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b,
+				   const char *buf, size_t count)
+{
+	unsigned int input;
+	int ret;
+
+	ret = sscanf(buf, "%u", &input);
+	if (ret != 1)
+		return -EINVAL;
+	dbs_tuners_ins.optimal_freq = input;
+	return count;
+}
+
 static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
 				  const char *buf, size_t count)
 {
@@ -394,6 +436,36 @@
 	return count;
 }
 
+static ssize_t store_up_threshold_multi_core(struct kobject *a,
+			struct attribute *b, const char *buf, size_t count)
+{
+	unsigned int input;
+	int ret;
+	ret = sscanf(buf, "%u", &input);
+
+	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+			input < MIN_FREQUENCY_UP_THRESHOLD) {
+		return -EINVAL;
+	}
+	dbs_tuners_ins.up_threshold_multi_core = input;
+	return count;
+}
+
+static ssize_t store_up_threshold_any_cpu_load(struct kobject *a,
+			struct attribute *b, const char *buf, size_t count)
+{
+	unsigned int input;
+	int ret;
+	ret = sscanf(buf, "%u", &input);
+
+	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+			input < MIN_FREQUENCY_UP_THRESHOLD) {
+		return -EINVAL;
+	}
+	dbs_tuners_ins.up_threshold_any_cpu_load = input;
+	return count;
+}
+
 static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
 		const char *buf, size_t count)
 {
@@ -576,6 +648,10 @@
 define_one_global_rw(sampling_down_factor);
 define_one_global_rw(ignore_nice_load);
 define_one_global_rw(powersave_bias);
+define_one_global_rw(up_threshold_multi_core);
+define_one_global_rw(optimal_freq);
+define_one_global_rw(up_threshold_any_cpu_load);
+define_one_global_rw(sync_freq);
 
 static struct attribute *dbs_attributes[] = {
 	&sampling_rate_min.attr,
@@ -586,6 +662,10 @@
 	&ignore_nice_load.attr,
 	&powersave_bias.attr,
 	&io_is_busy.attr,
+	&up_threshold_multi_core.attr,
+	&optimal_freq.attr,
+	&up_threshold_any_cpu_load.attr,
+	&sync_freq.attr,
 	NULL
 };
 
@@ -614,7 +694,7 @@
 	unsigned int max_load_freq;
 	/* Current load across this CPU */
 	unsigned int cur_load = 0;
-
+	unsigned int max_load_other_cpu = 0;
 	struct cpufreq_policy *policy;
 	unsigned int j;
 
@@ -691,7 +771,8 @@
 			continue;
 
 		cur_load = 100 * (wall_time - idle_time) / wall_time;
-
+		j_dbs_info->max_load  = max(cur_load, j_dbs_info->prev_load);
+		j_dbs_info->prev_load = cur_load;
 		freq_avg = __cpufreq_driver_getavg(policy, j);
 		if (freq_avg <= 0)
 			freq_avg = policy->cur;
@@ -700,11 +781,37 @@
 		if (load_freq > max_load_freq)
 			max_load_freq = load_freq;
 	}
+
+	for_each_online_cpu(j) {
+		struct cpu_dbs_info_s *j_dbs_info;
+		j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
+
+		if (j == policy->cpu)
+			continue;
+
+		if (max_load_other_cpu < j_dbs_info->max_load)
+			max_load_other_cpu = j_dbs_info->max_load;
+		/*
+		 * The other cpu could be running at higher frequency
+		 * but may not have completed it's sampling_down_factor.
+		 * For that case consider other cpu is loaded so that
+		 * frequency imbalance does not occur.
+		 */
+
+		if ((j_dbs_info->cur_policy != NULL)
+			&& (j_dbs_info->cur_policy->cur ==
+					j_dbs_info->cur_policy->max)) {
+
+			if (policy->cur >= dbs_tuners_ins.optimal_freq)
+				max_load_other_cpu =
+				dbs_tuners_ins.up_threshold_any_cpu_load;
+		}
+	}
+
 	/* calculate the scaled load across CPU */
 	load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
 
 	cpufreq_notify_utilization(policy, load_at_max_freq);
-
 	/* Check for frequency increase */
 	if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
 		/* If switching to max speed, apply sampling_down_factor */
@@ -715,6 +822,25 @@
 		return;
 	}
 
+	if (num_online_cpus() > 1) {
+
+		if (max_load_other_cpu >
+				dbs_tuners_ins.up_threshold_any_cpu_load) {
+			if (policy->cur < dbs_tuners_ins.sync_freq)
+				dbs_freq_increase(policy,
+						dbs_tuners_ins.sync_freq);
+			return;
+		}
+
+		if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core *
+								policy->cur) {
+			if (policy->cur < dbs_tuners_ins.optimal_freq)
+				dbs_freq_increase(policy,
+						dbs_tuners_ins.optimal_freq);
+			return;
+		}
+	}
+
 	/* Check for frequency decrease */
 	/* if we cannot reduce the frequency anymore, break out early */
 	if (policy->cur == policy->min)
@@ -739,6 +865,20 @@
 		if (freq_next < policy->min)
 			freq_next = policy->min;
 
+		if (num_online_cpus() > 1) {
+			if (max_load_other_cpu >
+			(dbs_tuners_ins.up_threshold_multi_core -
+			dbs_tuners_ins.down_differential) &&
+			freq_next < dbs_tuners_ins.sync_freq)
+				freq_next = dbs_tuners_ins.sync_freq;
+
+			if (max_load_freq >
+				 (dbs_tuners_ins.up_threshold_multi_core -
+				  dbs_tuners_ins.down_differential_multi_core) *
+				  policy->cur)
+				freq_next = dbs_tuners_ins.optimal_freq;
+
+		}
 		if (!dbs_tuners_ins.powersave_bias) {
 			__cpufreq_driver_target(policy, freq_next,
 					CPUFREQ_RELATION_L);
@@ -989,6 +1129,12 @@
 				max(min_sampling_rate,
 				    latency * LATENCY_MULTIPLIER);
 			dbs_tuners_ins.io_is_busy = should_io_be_busy();
+
+			if (dbs_tuners_ins.optimal_freq == 0)
+				dbs_tuners_ins.optimal_freq = policy->min;
+
+			if (dbs_tuners_ins.sync_freq == 0)
+				dbs_tuners_ins.sync_freq = policy->min;
 		}
 		if (!cpu)
 			rc = input_register_handler(&dbs_input_handler);
diff --git a/drivers/media/video/msm/gemini/msm_gemini_core.c b/drivers/media/video/msm/gemini/msm_gemini_core.c
index 45d3937..2a5495e 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_core.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_core.c
@@ -142,7 +142,7 @@
 	buf_p = msm_gemini_hw_pingpong_active_buffer(&we_pingpong_buf);
 	if (buf_p) {
 		buf_p->framedone_len = msm_gemini_hw_encode_output_size();
-		GMN_DBG("%s:%d] framedone_len %d\n", __func__, __LINE__,
+		pr_err("%s:%d] framedone_len %d\n", __func__, __LINE__,
 			buf_p->framedone_len);
 	}
 
diff --git a/drivers/media/video/msm/gemini/msm_gemini_hw.c b/drivers/media/video/msm/gemini/msm_gemini_hw.c
index ba8f353..241cbbd 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_hw.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_hw.c
@@ -297,6 +297,7 @@
 
 	struct msm_gemini_hw_cmd *hw_cmd_p;
 
+	GMN_DBG("%s:%d] pingpong index %d", __func__, __LINE__, pingpong_index);
 	if (pingpong_index == 0) {
 		hw_cmd_p = &hw_cmd_we_ping_update[0];
 
@@ -523,3 +524,31 @@
 	}
 }
 
+void msm_gemini_io_dump(int size)
+{
+	char line_str[128], *p_str;
+	void __iomem *addr = gemini_region_base;
+	int i;
+	u32 *p = (u32 *) addr;
+	u32 data;
+	pr_err("%s: %p %d reg_size %d\n", __func__, addr, size,
+							gemini_region_size);
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size/4; i++) {
+		if (i % 4 == 0) {
+			snprintf(p_str, 12, "%08x: ", (u32) p);
+			p_str += 10;
+		}
+		data = readl_relaxed(p++);
+		snprintf(p_str, 12, "%08x ", data);
+		p_str += 9;
+		if ((i + 1) % 4 == 0) {
+			pr_err("%s\n", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		pr_err("%s\n", line_str);
+}
diff --git a/drivers/media/video/msm/gemini/msm_gemini_hw.h b/drivers/media/video/msm/gemini/msm_gemini_hw.h
index 248ba06..c147f11 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_hw.h
+++ b/drivers/media/video/msm/gemini/msm_gemini_hw.h
@@ -96,6 +96,7 @@
 void msm_gemini_hw_delay(struct msm_gemini_hw_cmd *hw_cmd_p, int m_us);
 int msm_gemini_hw_exec_cmds(struct msm_gemini_hw_cmd *hw_cmd_p, int m_cmds);
 void msm_gemini_hw_region_dump(int size);
+void msm_gemini_io_dump(int size);
 
 #define MSM_GEMINI_PIPELINE_CLK_128MHZ 128 /* 8MP  128MHz */
 #define MSM_GEMINI_PIPELINE_CLK_140MHZ 140 /* 9MP  140MHz */
diff --git a/drivers/media/video/msm/gemini/msm_gemini_hw_reg.h b/drivers/media/video/msm/gemini/msm_gemini_hw_reg.h
index 4bddfbb..bd992d0 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_hw_reg.h
+++ b/drivers/media/video/msm/gemini/msm_gemini_hw_reg.h
@@ -171,6 +171,6 @@
 #define HWIO_JPEG_IRQ_STATUS_RMSK 0xffffffff
 
 #define HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_ADDR (GEMINI_REG_BASE + 0x00000034)
-#define HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_RMSK 0xffffff
+#define HWIO_JPEG_STATUS_ENCODE_OUTPUT_SIZE_RMSK 0xffffffff
 
 #endif /* MSM_GEMINI_HW_REG_H */
diff --git a/drivers/media/video/msm/gemini/msm_gemini_sync.c b/drivers/media/video/msm/gemini/msm_gemini_sync.c
index 97bb611..b89a468 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_sync.c
+++ b/drivers/media/video/msm/gemini/msm_gemini_sync.c
@@ -20,9 +20,14 @@
 #include "msm_gemini_core.h"
 #include "msm_gemini_platform.h"
 #include "msm_gemini_common.h"
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
 
 static int release_buf;
 
+/* size is based on 4k page size */
+static const int g_max_out_size = 0x7ff000;
+
 /*************** queue helper ****************/
 inline void msm_gemini_q_init(char const *name, struct msm_gemini_q *q_p)
 {
@@ -180,7 +185,7 @@
 {
 	int rc = 0;
 
-	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	GMN_PR_ERR("%s:%d] buf_in %p", __func__, __LINE__, buf_in);
 
 	if (buf_in) {
 		buf_in->vbuf.framedone_len = buf_in->framedone_len;
@@ -266,13 +271,82 @@
 
 /*************** output queue ****************/
 
+int msm_gemini_get_out_buffer(struct msm_gemini_device *pgmn_dev,
+	struct msm_gemini_hw_buf *p_outbuf)
+{
+	int buf_size = 0;
+	int bytes_remaining = 0;
+	if (pgmn_dev->out_offset >= pgmn_dev->out_buf.y_len) {
+		GMN_PR_ERR("%s:%d] no more buffers", __func__, __LINE__);
+		return -EINVAL;
+	}
+	bytes_remaining = pgmn_dev->out_buf.y_len - pgmn_dev->out_offset;
+	buf_size = min(bytes_remaining, pgmn_dev->max_out_size);
+
+	pgmn_dev->out_frag_cnt++;
+	pr_err("%s:%d] buf_size[%d] %d", __func__, __LINE__,
+		pgmn_dev->out_frag_cnt, buf_size);
+	p_outbuf->y_len = buf_size;
+	p_outbuf->y_buffer_addr = pgmn_dev->out_buf.y_buffer_addr +
+		pgmn_dev->out_offset;
+	pgmn_dev->out_offset += buf_size;
+	return 0;
+}
+
+int msm_gemini_outmode_single_we_pingpong_irq(
+	struct msm_gemini_device *pgmn_dev,
+	struct msm_gemini_core_buf *buf_in)
+{
+	int rc = 0;
+	struct msm_gemini_core_buf out_buf;
+	int frame_done = buf_in &&
+		buf_in->vbuf.type == MSM_GEMINI_EVT_FRAMEDONE;
+	pr_err("%s:%d] framedone %d", __func__, __LINE__, frame_done);
+	if (!pgmn_dev->out_buf_set) {
+		GMN_PR_ERR("%s:%d] output buffer not set",
+			__func__, __LINE__);
+		return -EFAULT;
+	}
+	if (frame_done) {
+		/* send the buffer back */
+		pgmn_dev->out_buf.vbuf.framedone_len = buf_in->framedone_len;
+		pgmn_dev->out_buf.vbuf.type = MSM_GEMINI_EVT_FRAMEDONE;
+		rc = msm_gemini_q_in_buf(&pgmn_dev->output_rtn_q,
+			&pgmn_dev->out_buf);
+		if (rc) {
+			GMN_PR_ERR("%s:%d] cannot queue the output buffer",
+				 __func__, __LINE__);
+			return -EFAULT;
+		}
+		rc =  msm_gemini_q_wakeup(&pgmn_dev->output_rtn_q);
+		/* reset the output buffer since the ownership is
+			transferred to the rtn queue */
+		if (!rc)
+		  pgmn_dev->out_buf_set = 0;
+	} else {
+		/* configure ping/pong */
+		rc = msm_gemini_get_out_buffer(pgmn_dev, &out_buf);
+		if (rc)
+			msm_gemini_core_we_buf_reset(&out_buf);
+		else
+			msm_gemini_core_we_buf_update(&out_buf);
+	}
+	return rc;
+}
+
 int msm_gemini_we_pingpong_irq(struct msm_gemini_device *pgmn_dev,
 	struct msm_gemini_core_buf *buf_in)
 {
 	int rc = 0;
 	struct msm_gemini_core_buf *buf_out;
 
-	GMN_DBG("%s:%d] Enter\n", __func__, __LINE__);
+	GMN_DBG("%s:%d] Enter mode %d", __func__, __LINE__,
+		pgmn_dev->out_mode);
+
+	if (pgmn_dev->out_mode == MSM_GMN_OUTMODE_SINGLE)
+		return msm_gemini_outmode_single_we_pingpong_irq(pgmn_dev,
+			buf_in);
+
 	if (buf_in) {
 		GMN_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
 			(int) buf_in->y_buffer_addr, buf_in->y_len);
@@ -339,6 +413,43 @@
 	return 0;
 }
 
+int msm_gemini_set_output_buf(struct msm_gemini_device *pgmn_dev,
+	void __user *arg)
+{
+	struct msm_gemini_buf buf_cmd;
+
+	if (pgmn_dev->out_buf_set) {
+		GMN_PR_ERR("%s:%d] outbuffer buffer already provided",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_gemini_buf))) {
+		GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	GMN_DBG("%s:%d] output addr 0x%08x len %d", __func__, __LINE__,
+		(int) buf_cmd.vaddr,
+		buf_cmd.y_len);
+
+	pgmn_dev->out_buf.y_buffer_addr = msm_gemini_platform_v2p(
+		buf_cmd.fd,
+		buf_cmd.y_len,
+		&pgmn_dev->out_buf.file,
+		&pgmn_dev->out_buf.handle);
+	if (!pgmn_dev->out_buf.y_buffer_addr) {
+		GMN_PR_ERR("%s:%d] cannot map the output address",
+			__func__, __LINE__);
+		return -EFAULT;
+	}
+	pgmn_dev->out_buf.y_len = buf_cmd.y_len;
+	pgmn_dev->out_buf.vbuf = buf_cmd;
+	pgmn_dev->out_buf_set = 1;
+
+	return 0;
+}
+
 int msm_gemini_output_buf_enqueue(struct msm_gemini_device *pgmn_dev,
 	void __user *arg)
 {
@@ -456,6 +567,7 @@
 	struct msm_gemini_core_buf *buf_p;
 	struct msm_gemini_buf buf_cmd;
 	int rc = 0;
+	struct msm_bus_scale_pdata *p_bus_scale_data = NULL;
 
 	if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_gemini_buf))) {
 		GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
@@ -484,9 +596,9 @@
 			return rc;
 		}
 	} else {
-	buf_p->y_buffer_addr    = msm_gemini_platform_v2p(buf_cmd.fd,
-		buf_cmd.y_len + buf_cmd.cbcr_len, &buf_p->file,
-		&buf_p->handle)	+ buf_cmd.offset + buf_cmd.y_off;
+		buf_p->y_buffer_addr    = msm_gemini_platform_v2p(buf_cmd.fd,
+			buf_cmd.y_len + buf_cmd.cbcr_len, &buf_p->file,
+			&buf_p->handle)	+ buf_cmd.offset + buf_cmd.y_off;
 	}
 	buf_p->y_len          = buf_cmd.y_len;
 
@@ -504,6 +616,30 @@
 		return -1;
 	}
 	buf_p->vbuf           = buf_cmd;
+	buf_p->vbuf.type      = MSM_GEMINI_EVT_RESET;
+
+	/* Set bus vectors */
+	p_bus_scale_data = (struct msm_bus_scale_pdata *)
+		pgmn_dev->pdev->dev.platform_data;
+	if (pgmn_dev->bus_perf_client &&
+		(MSM_GMN_OUTMODE_SINGLE == pgmn_dev->out_mode)) {
+		int rc;
+		struct msm_bus_paths *path = &(p_bus_scale_data->usecase[1]);
+		GMN_DBG("%s:%d] Update bus bandwidth", __func__, __LINE__);
+		if (pgmn_dev->op_mode & MSM_GEMINI_MODE_OFFLINE_ENCODE) {
+			path->vectors[0].ab = (buf_p->y_len + buf_p->cbcr_len) *
+				15 * 2;
+			path->vectors[0].ib = path->vectors[0].ab;
+			path->vectors[1].ab = 0;
+			path->vectors[1].ib = 0;
+		}
+		rc = msm_bus_scale_client_update_request(
+			pgmn_dev->bus_perf_client, 1);
+		if (rc < 0) {
+			GMN_PR_ERR("%s:%d] update_request fails %d",
+				__func__, __LINE__, rc);
+		}
+	}
 
 	msm_gemini_q_in(&pgmn_dev->input_buf_q, buf_p);
 
@@ -545,6 +681,9 @@
 int __msm_gemini_open(struct msm_gemini_device *pgmn_dev)
 {
 	int rc;
+	struct msm_bus_scale_pdata *p_bus_scale_data =
+		(struct msm_bus_scale_pdata *)pgmn_dev->pdev->dev.
+			platform_data;
 
 	mutex_lock(&pgmn_dev->lock);
 	if (pgmn_dev->open_count) {
@@ -576,7 +715,23 @@
 	msm_gemini_q_cleanup(&pgmn_dev->input_rtn_q);
 	msm_gemini_q_cleanup(&pgmn_dev->input_buf_q);
 	msm_gemini_core_init();
+	pgmn_dev->out_mode = MSM_GMN_OUTMODE_FRAGMENTED;
+	pgmn_dev->out_buf_set = 0;
+	pgmn_dev->out_offset = 0;
+	pgmn_dev->max_out_size = g_max_out_size;
+	pgmn_dev->out_frag_cnt = 0;
+	pgmn_dev->bus_perf_client = 0;
 
+	if (p_bus_scale_data) {
+		GMN_DBG("%s:%d] register bus client", __func__, __LINE__);
+		pgmn_dev->bus_perf_client =
+			msm_bus_scale_register_client(p_bus_scale_data);
+		if (!pgmn_dev->bus_perf_client) {
+			GMN_PR_ERR("%s:%d] bus client register failed",
+				__func__, __LINE__);
+			return -EINVAL;
+		}
+	}
 	GMN_DBG("%s:%d] success\n", __func__, __LINE__);
 	return rc;
 }
@@ -593,13 +748,23 @@
 	pgmn_dev->open_count--;
 	mutex_unlock(&pgmn_dev->lock);
 
-	msm_gemini_core_release(release_buf);
+	if (pgmn_dev->out_mode == MSM_GMN_OUTMODE_FRAGMENTED) {
+		msm_gemini_core_release(release_buf);
+	} else if (pgmn_dev->out_buf_set) {
+		msm_gemini_platform_p2v(pgmn_dev->out_buf.file,
+			&pgmn_dev->out_buf.handle);
+	}
 	msm_gemini_q_cleanup(&pgmn_dev->evt_q);
 	msm_gemini_q_cleanup(&pgmn_dev->output_rtn_q);
 	msm_gemini_outbuf_q_cleanup(&pgmn_dev->output_buf_q);
 	msm_gemini_q_cleanup(&pgmn_dev->input_rtn_q);
 	msm_gemini_outbuf_q_cleanup(&pgmn_dev->input_buf_q);
 
+	if (pgmn_dev->bus_perf_client) {
+		msm_bus_scale_unregister_client(pgmn_dev->bus_perf_client);
+		pgmn_dev->bus_perf_client = 0;
+	}
+
 	if (pgmn_dev->open_count)
 		GMN_PR_ERR(KERN_ERR "%s: multiple opens\n", __func__);
 
@@ -699,29 +864,59 @@
 		}
 	}
 
-	for (i = 0; i < 2; i++) {
-		buf_out_free[i] = msm_gemini_q_out(&pgmn_dev->output_buf_q);
+	if (pgmn_dev->out_mode == MSM_GMN_OUTMODE_FRAGMENTED) {
+		for (i = 0; i < 2; i++) {
+			buf_out_free[i] =
+				msm_gemini_q_out(&pgmn_dev->output_buf_q);
 
-		if (buf_out_free[i]) {
-			msm_gemini_core_we_buf_update(buf_out_free[i]);
-		} else if (i == 1) {
-			/* set the pong to same address as ping */
-			buf_out_free[0]->y_len >>= 1;
-			buf_out_free[0]->y_buffer_addr +=
-				buf_out_free[0]->y_len;
-			msm_gemini_core_we_buf_update(buf_out_free[0]);
-			/* since ping and pong are same buf release only once*/
-			release_buf = 0;
-		} else {
-			GMN_DBG("%s:%d] no output buffer\n",
-			__func__, __LINE__);
-			break;
+			if (buf_out_free[i]) {
+				msm_gemini_core_we_buf_update(buf_out_free[i]);
+			} else if (i == 1) {
+				/* set the pong to same address as ping */
+				buf_out_free[0]->y_len >>= 1;
+				buf_out_free[0]->y_buffer_addr +=
+					buf_out_free[0]->y_len;
+				msm_gemini_core_we_buf_update(buf_out_free[0]);
+				/* since ping and pong are same buf
+				*  release only once */
+				release_buf = 0;
+			} else {
+				GMN_DBG("%s:%d] no output buffer\n",
+					__func__, __LINE__);
+				break;
+			}
 		}
+		for (i = 0; i < 2; i++)
+			kfree(buf_out_free[i]);
+	} else {
+		struct msm_gemini_core_buf out_buf;
+		/* Since the same buffer is fragmented, p2v need not be
+		called for all the buffers */
+		release_buf = 0;
+		if (!pgmn_dev->out_buf_set) {
+			GMN_PR_ERR("%s:%d] output buffer not set",
+				__func__, __LINE__);
+			return -EFAULT;
+		}
+		/* configure ping */
+		rc = msm_gemini_get_out_buffer(pgmn_dev, &out_buf);
+		if (rc) {
+			GMN_PR_ERR("%s:%d] no output buffer for ping",
+				__func__, __LINE__);
+			return rc;
+		}
+		msm_gemini_core_we_buf_update(&out_buf);
+		/* configure pong */
+		rc = msm_gemini_get_out_buffer(pgmn_dev, &out_buf);
+		if (rc) {
+			GMN_DBG("%s:%d] no output buffer for pong",
+				__func__, __LINE__);
+			/* fall through to configure same buffer */
+		}
+		msm_gemini_core_we_buf_update(&out_buf);
+		//msm_gemini_io_dump(0x150);
 	}
 
-	for (i = 0; i < 2; i++)
-		kfree(buf_out_free[i]);
-
 	rc = msm_gemini_ioctl_hw_cmds(pgmn_dev, arg);
 	GMN_DBG("%s:%d]\n", __func__, __LINE__);
 	return rc;
@@ -746,6 +941,24 @@
 	return rc;
 }
 
+int msm_gemini_ioctl_set_outmode(struct msm_gemini_device *pgmn_dev,
+	void * __user arg)
+{
+	int rc = 0;
+	enum msm_gmn_out_mode mode;
+
+	if (copy_from_user(&mode, arg, sizeof(mode))) {
+		GMN_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+	GMN_DBG("%s:%d] mode %d", __func__, __LINE__, mode);
+
+	if ((mode == MSM_GMN_OUTMODE_FRAGMENTED)
+		||(mode == MSM_GMN_OUTMODE_SINGLE))
+		pgmn_dev->out_mode = mode;
+	return rc;
+}
+
 int msm_gemini_ioctl_test_dump_region(struct msm_gemini_device *pgmn_dev,
 	unsigned long arg)
 {
@@ -790,8 +1003,12 @@
 		break;
 
 	case MSM_GMN_IOCTL_OUTPUT_BUF_ENQUEUE:
-		rc = msm_gemini_output_buf_enqueue(pgmn_dev,
-			(void __user *) arg);
+		if (pgmn_dev->out_mode == MSM_GMN_OUTMODE_FRAGMENTED)
+			rc = msm_gemini_output_buf_enqueue(pgmn_dev,
+				(void __user *) arg);
+		else
+			rc = msm_gemini_set_output_buf(pgmn_dev,
+				(void __user *) arg);
 		break;
 
 	case MSM_GMN_IOCTL_OUTPUT_GET:
@@ -822,6 +1039,10 @@
 		rc = msm_gemini_ioctl_test_dump_region(pgmn_dev, arg);
 		break;
 
+	case MSM_GMN_IOCTL_SET_MODE:
+		rc = msm_gemini_ioctl_set_outmode(pgmn_dev, (void __user *)arg);
+		break;
+
 	default:
 		GMN_PR_ERR(KERN_INFO "%s:%d] cmd = %d not supported\n",
 			__func__, __LINE__, _IOC_NR(cmd));
diff --git a/drivers/media/video/msm/gemini/msm_gemini_sync.h b/drivers/media/video/msm/gemini/msm_gemini_sync.h
index 1c6726d..3f6f1bc 100644
--- a/drivers/media/video/msm/gemini/msm_gemini_sync.h
+++ b/drivers/media/video/msm/gemini/msm_gemini_sync.h
@@ -74,6 +74,16 @@
 	struct msm_gemini_q input_buf_q;
 
 	struct v4l2_subdev subdev;
+	enum msm_gmn_out_mode out_mode;
+
+	/*single out mode parameters*/
+	struct msm_gemini_hw_buf out_buf;
+	int out_offset;
+	int out_buf_set;
+	int max_out_size;
+	int out_frag_cnt;
+
+	uint32_t bus_perf_client;
 };
 
 int __msm_gemini_open(struct msm_gemini_device *pgmn_dev);
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 0755a1c..247b721 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -2130,11 +2130,6 @@
 	if (soc > 100)
 		soc = 100;
 
-	if (bms_fake_battery != -EINVAL) {
-		pr_debug("Returning Fake SOC = %d%%\n", bms_fake_battery);
-		return bms_fake_battery;
-	}
-
 	if (soc < 0) {
 		pr_err("bad rem_usb_chg = %d rem_chg %d,"
 				"cc_uah %d, unusb_chg %d\n",
@@ -2253,6 +2248,11 @@
 	int batt_temp;
 	int rc;
 
+	if (bms_fake_battery != -EINVAL) {
+		pr_debug("Returning Fake SOC = %d%%\n", bms_fake_battery);
+		return bms_fake_battery;
+	}
+
 	rc = pm8xxx_adc_read(the_chip->batt_temp_channel, &result);
 	if (rc) {
 		pr_err("error reading adc channel = %d, rc = %d\n",
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
index c91cd68..591440d 100644
--- a/drivers/usb/misc/ks_bridge.c
+++ b/drivers/usb/misc/ks_bridge.c
@@ -69,7 +69,6 @@
 	struct usb_anchor	submitted;
 
 	unsigned long		flags;
-	unsigned int		alloced_read_pkts;
 
 #define DBG_MSG_LEN   40
 #define DBG_MAX_MSG   500
@@ -136,6 +135,8 @@
 }
 
 
+static void
+submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt);
 static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
 				size_t count, loff_t *pos)
 {
@@ -174,7 +175,6 @@
 		if (ret) {
 			pr_err("copy_to_user failed err:%d\n", ret);
 			ksb_free_data_pkt(pkt);
-			ksb->alloced_read_pkts--;
 			return ret;
 		}
 
@@ -184,9 +184,16 @@
 
 		spin_lock_irqsave(&ksb->lock, flags);
 		if (pkt->n_read == pkt->len) {
+			/*
+			 * re-init the packet and queue it
+			 * for more data.
+			 */
 			list_del_init(&pkt->list);
-			ksb_free_data_pkt(pkt);
-			ksb->alloced_read_pkts--;
+			pkt->n_read = 0;
+			pkt->len = MAX_DATA_PKT_SIZE;
+			spin_unlock_irqrestore(&ksb->lock, flags);
+			submit_one_urb(ksb, GFP_KERNEL, pkt);
+			spin_lock_irqsave(&ksb->lock, flags);
 		}
 	}
 	spin_unlock_irqrestore(&ksb->lock, flags);
@@ -394,25 +401,18 @@
 MODULE_DEVICE_TABLE(usb, ksb_usb_ids);
 
 static void ksb_rx_cb(struct urb *urb);
-static void submit_one_urb(struct ks_bridge *ksb)
+static void
+submit_one_urb(struct ks_bridge *ksb, gfp_t flags, struct data_pkt *pkt)
 {
-	struct data_pkt	*pkt;
 	struct urb *urb;
 	int ret;
 
-	pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_ATOMIC, ksb);
-	if (IS_ERR(pkt)) {
-		pr_err("unable to allocate data pkt");
-		return;
-	}
-
-	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	urb = usb_alloc_urb(0, flags);
 	if (!urb) {
 		pr_err("unable to allocate urb");
 		ksb_free_data_pkt(pkt);
 		return;
 	}
-	ksb->alloced_read_pkts++;
 
 	usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
 			pkt->buf, pkt->len,
@@ -421,13 +421,12 @@
 
 	dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
 
-	ret = usb_submit_urb(urb, GFP_ATOMIC);
+	ret = usb_submit_urb(urb, flags);
 	if (ret) {
 		pr_err("in urb submission failed");
 		usb_unanchor_urb(urb);
 		usb_free_urb(urb);
 		ksb_free_data_pkt(pkt);
-		ksb->alloced_read_pkts--;
 		return;
 	}
 
@@ -452,14 +451,12 @@
 			pr_err_ratelimited("urb failed with err:%d",
 					urb->status);
 		ksb_free_data_pkt(pkt);
-		ksb->alloced_read_pkts--;
 		return;
 	}
 
 	if (urb->actual_length == 0) {
-		ksb_free_data_pkt(pkt);
-		ksb->alloced_read_pkts--;
-		goto resubmit_urb;
+		submit_one_urb(ksb, GFP_ATOMIC, pkt);
+		return;
 	}
 
 add_to_list:
@@ -470,10 +467,6 @@
 
 	/* wake up read thread */
 	wake_up(&ksb->ks_wait_q);
-
-resubmit_urb:
-	submit_one_urb(ksb);
-
 }
 
 static void ksb_start_rx_work(struct work_struct *w)
@@ -506,7 +499,6 @@
 			ksb_free_data_pkt(pkt);
 			return;
 		}
-		ksb->alloced_read_pkts++;
 
 		usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
 				pkt->buf, pkt->len,
@@ -521,7 +513,6 @@
 			usb_unanchor_urb(urb);
 			usb_free_urb(urb);
 			ksb_free_data_pkt(pkt);
-			ksb->alloced_read_pkts--;
 			usb_autopm_put_interface(ksb->ifc);
 			return;
 		}
@@ -610,8 +601,6 @@
 
 	dbg_log_event(ksb, "SUSPEND", 0, 0);
 
-	pr_debug("read cnt: %d", ksb->alloced_read_pkts);
-
 	usb_kill_anchored_urbs(&ksb->submitted);
 
 	return 0;
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 6b75781..e2e2df6 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -2228,6 +2228,10 @@
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
 	mdp_histogram_ctrl_all(TRUE);
+
+	if (ret == 0)
+		ret = panel_next_late_init(pdev);
+
 	pr_debug("%s:-\n", __func__);
 
 	return ret;
@@ -2534,6 +2538,7 @@
 	pdata = msm_fb_dev->dev.platform_data;
 	pdata->on = mdp_on;
 	pdata->off = mdp_off;
+	pdata->late_init = NULL;
 	pdata->next = pdev;
 
 	mdp_clk_ctrl(1);
diff --git a/drivers/video/msm/mipi_dsi.c b/drivers/video/msm/mipi_dsi.c
index b53e823..493b102 100644
--- a/drivers/video/msm/mipi_dsi.c
+++ b/drivers/video/msm/mipi_dsi.c
@@ -330,6 +330,11 @@
 	return ret;
 }
 
+static int mipi_dsi_late_init(struct platform_device *pdev)
+{
+	return panel_next_late_init(pdev);
+}
+
 
 static int mipi_dsi_resource_initialized;
 
@@ -477,6 +482,7 @@
 	pdata = mdp_dev->dev.platform_data;
 	pdata->on = mipi_dsi_on;
 	pdata->off = mipi_dsi_off;
+	pdata->late_init = mipi_dsi_late_init;
 	pdata->next = pdev;
 
 	/*
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
index 2215240..9a38198 100644
--- a/drivers/video/msm/mipi_novatek.c
+++ b/drivers/video/msm/mipi_novatek.c
@@ -449,6 +449,11 @@
 	return 0;
 }
 
+static int mipi_novatek_lcd_late_init(struct platform_device *pdev)
+{
+	return 0;
+}
+
 DEFINE_LED_TRIGGER(bkl_led_trigger);
 
 static char led_pwm1[2] = {0x51, 0x0};	/* DTYPE_DCS_WRITE1 */
@@ -547,6 +552,7 @@
 static struct msm_fb_panel_data novatek_panel_data = {
 	.on		= mipi_novatek_lcd_on,
 	.off		= mipi_novatek_lcd_off,
+	.late_init	= mipi_novatek_lcd_late_init,
 	.set_backlight = mipi_novatek_set_backlight,
 };
 
diff --git a/drivers/video/msm/mipi_toshiba.c b/drivers/video/msm/mipi_toshiba.c
index 520c67b..498e122 100644
--- a/drivers/video/msm/mipi_toshiba.c
+++ b/drivers/video/msm/mipi_toshiba.c
@@ -224,6 +224,11 @@
 	return 0;
 }
 
+static int mipi_toshiba_lcd_late_init(struct platform_device *pdev)
+{
+	return 0;
+}
+
 void mipi_bklight_pwm_cfg(void)
 {
 	if (mipi_toshiba_pdata && mipi_toshiba_pdata->dsi_pwm_cfg)
@@ -296,6 +301,7 @@
 static struct msm_fb_panel_data toshiba_panel_data = {
 	.on		= mipi_toshiba_lcd_on,
 	.off		= mipi_toshiba_lcd_off,
+	.late_init	= mipi_toshiba_lcd_late_init,
 	.set_backlight  = mipi_toshiba_set_backlight,
 };
 
diff --git a/drivers/video/msm/msm_fb_panel.c b/drivers/video/msm/msm_fb_panel.c
index 8640116..c604f02 100644
--- a/drivers/video/msm/msm_fb_panel.c
+++ b/drivers/video/msm/msm_fb_panel.c
@@ -78,6 +78,28 @@
 	return ret;
 }
 
+int panel_next_late_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct msm_fb_panel_data *pdata;
+	struct msm_fb_panel_data *next_pdata;
+	struct platform_device *next_pdev;
+
+	pdata = (struct msm_fb_panel_data *)pdev->dev.platform_data;
+
+	if (pdata) {
+		next_pdev = pdata->next;
+		if (next_pdev) {
+			next_pdata = (struct msm_fb_panel_data *)
+					next_pdev->dev.platform_data;
+			if ((next_pdata) && (next_pdata->late_init))
+				ret = next_pdata->late_init(next_pdev);
+		}
+	}
+
+	return ret;
+}
+
 struct platform_device *msm_fb_device_alloc(struct msm_fb_panel_data *pdata,
 						u32 type, u32 id)
 {
diff --git a/drivers/video/msm/msm_fb_panel.h b/drivers/video/msm/msm_fb_panel.h
index 9e1295d..ef58391 100644
--- a/drivers/video/msm/msm_fb_panel.h
+++ b/drivers/video/msm/msm_fb_panel.h
@@ -195,6 +195,7 @@
 	/* function entry chain */
 	int (*on) (struct platform_device *pdev);
 	int (*off) (struct platform_device *pdev);
+	int (*late_init) (struct platform_device *pdev);
 	int (*power_ctrl) (boolean enable);
 	struct platform_device *next;
 	int (*clk_func) (int enable);
@@ -207,6 +208,7 @@
 						u32 type, u32 id);
 int panel_next_on(struct platform_device *pdev);
 int panel_next_off(struct platform_device *pdev);
+int panel_next_late_init(struct platform_device *pdev);
 
 int lcdc_device_register(struct msm_panel_info *pinfo);
 
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6e0f58b..6b83222 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -664,7 +664,7 @@
 		}
 	}
 
-#ifdef CONFIG_DMA_CMA
+#ifdef CONFIG_CMA
 	if (is_cma_pageblock(page)) {
 		struct page *oldpage = page, *newpage;
 		int err;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7bdd3f2..b7a10fc 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -144,6 +144,7 @@
 	NUMA_OTHER,		/* allocation from other node */
 #endif
 	NR_ANON_TRANSPARENT_HUGEPAGES,
+	NR_FREE_CMA_PAGES,
 	NR_VM_ZONE_STAT_ITEMS };
 
 /*
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 65efb92..1d10474 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -258,6 +258,13 @@
 
 #endif		/* CONFIG_SMP */
 
+static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
+					     int migratetype)
+{
+	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
+	if (is_migrate_cma(migratetype))
+		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
+}
 extern const char * const vmstat_text[];
 
 #endif /* _LINUX_VMSTAT_H */
diff --git a/include/media/msm_gemini.h b/include/media/msm_gemini.h
index 0167335..2209758 100644
--- a/include/media/msm_gemini.h
+++ b/include/media/msm_gemini.h
@@ -51,10 +51,19 @@
 #define MSM_GMN_IOCTL_TEST_DUMP_REGION \
 	_IOW(MSM_GMN_IOCTL_MAGIC, 15, unsigned long)
 
+#define MSM_GMN_IOCTL_SET_MODE \
+	_IOW(MSM_GMN_IOCTL_MAGIC, 16, enum msm_gmn_out_mode)
+
 #define MSM_GEMINI_MODE_REALTIME_ENCODE 0
 #define MSM_GEMINI_MODE_OFFLINE_ENCODE 1
 #define MSM_GEMINI_MODE_REALTIME_ROTATION 2
 #define MSM_GEMINI_MODE_OFFLINE_ROTATION 3
+
+enum msm_gmn_out_mode {
+	MSM_GMN_OUTMODE_FRAGMENTED,
+	MSM_GMN_OUTMODE_SINGLE
+};
+
 struct msm_gemini_ctrl_cmd {
 	uint32_t type;
 	uint32_t len;
diff --git a/mm/memory.c b/mm/memory.c
index b2baf36..a240b04 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2900,7 +2900,7 @@
 	entry = pte_to_swp_entry(orig_pte);
 	if (unlikely(non_swap_entry(entry))) {
 		if (is_migration_entry(entry)) {
-#ifdef CONFIG_DMA_CMA
+#ifdef CONFIG_CMA
 			/*
 			 * FIXME: mszyprow: cruel, brute-force method for
 			 * letting cma/migration to finish it's job without
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bd3f0f3..30fd6b1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -561,7 +561,8 @@
 		if (page_is_guard(buddy)) {
 			clear_page_guard_flag(buddy);
 			set_page_private(page, 0);
-			__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+			__mod_zone_freepage_state(zone, 1 << order,
+						  migratetype);
 		} else {
 			list_del(&buddy->lru);
 			zone->free_area[order].nr_free--;
@@ -643,6 +644,7 @@
 	int migratetype = 0;
 	int batch_free = 0;
 	int to_free = count;
+	int mt = 0;
 
 	spin_lock(&zone->lock);
 	zone->all_unreclaimable = 0;
@@ -672,11 +674,15 @@
 
 		do {
 			page = list_entry(list->prev, struct page, lru);
+			mt = get_pageblock_migratetype(page);
 			/* must delete as __free_one_page list manipulates */
 			list_del(&page->lru);
 			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
 			__free_one_page(page, zone, 0, page_private(page));
 			trace_mm_page_pcpu_drain(page, 0, page_private(page));
+			if (is_migrate_cma(mt))
+				__mod_zone_page_state(zone,
+				NR_FREE_CMA_PAGES, 1);
 		} while (--to_free && --batch_free && !list_empty(list));
 	}
 	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
@@ -691,7 +697,8 @@
 	zone->pages_scanned = 0;
 
 	__free_one_page(page, zone, order, migratetype);
-	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+	if (unlikely(migratetype != MIGRATE_ISOLATE))
+		__mod_zone_freepage_state(zone, 1 << order, migratetype);
 	spin_unlock(&zone->lock);
 }
 
@@ -819,7 +826,8 @@
 			set_page_guard_flag(&page[size]);
 			set_page_private(&page[size], high);
 			/* Guard pages are not available for any usage */
-			__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
+			__mod_zone_freepage_state(zone, -(1 << high),
+						  migratetype);
 			continue;
 		}
 #endif
@@ -1145,6 +1153,9 @@
 		}
 		set_page_private(page, mt);
 		list = &page->lru;
+		if (is_migrate_cma(mt))
+			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
+					      -(1 << order));
 	}
 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
 	spin_unlock(&zone->lock);
@@ -1418,7 +1429,9 @@
 	list_del(&page->lru);
 	zone->free_area[order].nr_free--;
 	rmv_page_order(page);
-	__mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
+
+	if (unlikely(mt != MIGRATE_ISOLATE))
+		__mod_zone_freepage_state(zone, -(1UL << order), mt);
 
 	/* Split into individual pages */
 	set_page_refcounted(page);
@@ -1493,7 +1506,8 @@
 		spin_unlock(&zone->lock);
 		if (!page)
 			goto failed;
-		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
+		__mod_zone_freepage_state(zone, -(1 << order),
+					  get_pageblock_migratetype(page));
 	}
 
 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
@@ -2806,7 +2820,8 @@
 		" unevictable:%lu"
 		" dirty:%lu writeback:%lu unstable:%lu\n"
 		" free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
-		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
+		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
+		" free_cma:%lu\n",
 		global_page_state(NR_ACTIVE_ANON),
 		global_page_state(NR_INACTIVE_ANON),
 		global_page_state(NR_ISOLATED_ANON),
@@ -2823,7 +2838,8 @@
 		global_page_state(NR_FILE_MAPPED),
 		global_page_state(NR_SHMEM),
 		global_page_state(NR_PAGETABLE),
-		global_page_state(NR_BOUNCE));
+		global_page_state(NR_BOUNCE),
+		global_page_state(NR_FREE_CMA_PAGES));
 
 	for_each_populated_zone(zone) {
 		int i;
@@ -2855,6 +2871,7 @@
 			" pagetables:%lukB"
 			" unstable:%lukB"
 			" bounce:%lukB"
+			" free_cma:%lukB"
 			" writeback_tmp:%lukB"
 			" pages_scanned:%lu"
 			" all_unreclaimable? %s"
@@ -2884,6 +2901,7 @@
 			K(zone_page_state(zone, NR_PAGETABLE)),
 			K(zone_page_state(zone, NR_UNSTABLE_NFS)),
 			K(zone_page_state(zone, NR_BOUNCE)),
+			K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
 			zone->pages_scanned,
 			(zone->all_unreclaimable ? "yes" : "no")
@@ -5615,8 +5633,13 @@
 
 out:
 	if (!ret) {
+		unsigned long nr_pages;
+		int migratetype = get_pageblock_migratetype(page);
+
 		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
-		move_freepages_block(zone, page, MIGRATE_ISOLATE);
+		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
+
+		__mod_zone_freepage_state(zone, -nr_pages, migratetype);
 	}
 
 	spin_unlock_irqrestore(&zone->lock, flags);
@@ -5628,13 +5651,15 @@
 void unset_migratetype_isolate(struct page *page, unsigned migratetype)
 {
 	struct zone *zone;
-	unsigned long flags;
+	unsigned long flags, nr_pages;
+
 	zone = page_zone(page);
 	spin_lock_irqsave(&zone->lock, flags);
 	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 		goto out;
+	nr_pages = move_freepages_block(zone, page, migratetype);
+	__mod_zone_freepage_state(zone, nr_pages, migratetype);
 	set_pageblock_migratetype(page, migratetype);
-	move_freepages_block(zone, page, migratetype);
 out:
 	spin_unlock_irqrestore(&zone->lock, flags);
 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 0dad31dc..8e18d6b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -722,6 +722,7 @@
 	"numa_other",
 #endif
 	"nr_anon_transparent_hugepages",
+	"nr_free_cma",
 	"nr_dirty_threshold",
 	"nr_dirty_background_threshold",
 
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index f5016fa..7bccabb 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -900,20 +900,20 @@
 	btn_low = tabla_mbhc_cal_btn_det_mp(btn_cfg, TABLA_BTN_DET_V_BTN_LOW);
 	btn_high = tabla_mbhc_cal_btn_det_mp(btn_cfg, TABLA_BTN_DET_V_BTN_HIGH);
 	btn_low[0] = -50;
-	btn_high[0] = 20;
-	btn_low[1] = 21;
-	btn_high[1] = 62;
-	btn_low[2] = 62;
-	btn_high[2] = 104;
-	btn_low[3] = 105;
-	btn_high[3] = 143;
-	btn_low[4] = 144;
-	btn_high[4] = 181;
-	btn_low[5] = 182;
-	btn_high[5] = 218;
-	btn_low[6] = 219;
-	btn_high[6] = 254;
-	btn_low[7] = 255;
+	btn_high[0] = 10;
+	btn_low[1] = 11;
+	btn_high[1] = 52;
+	btn_low[2] = 53;
+	btn_high[2] = 94;
+	btn_low[3] = 95;
+	btn_high[3] = 133;
+	btn_low[4] = 134;
+	btn_high[4] = 171;
+	btn_low[5] = 172;
+	btn_high[5] = 208;
+	btn_low[6] = 209;
+	btn_high[6] = 244;
+	btn_low[7] = 245;
 	btn_high[7] = 330;
 	n_ready = tabla_mbhc_cal_btn_det_mp(btn_cfg, TABLA_BTN_DET_N_READY);
 	n_ready[0] = 80;