A bunch of crap. initial dcvs, some hacks in f_projector, a hack delaying writeback_panel init so that device one is inited first, some missing files, some board/devices things ..
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index a40e5ab..161a2e0 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -2212,6 +2212,14 @@
 	depends on PM
 	bool
 
+config MSM_EVENT_TIMER
+	bool "Event timer"
+	help
+		This option enables a modules that manages a list of event timers that
+		need to be monitored by the PM. The enables the PM code to monitor
+		events that require the core to be awake and ready to handle the
+		event.
+
 config MSM_NOPM
 	default y if !PM
 	bool
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 3cc19ee..8e5363b 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -405,6 +405,7 @@
 obj-$(CONFIG_MSM_BUSPM_DEV) += msm-buspm-dev.o
 
 obj-$(CONFIG_MSM_IOMMU)		+= devices-iommu.o iommu_domains.o
+obj-$(CONFIG_MSM_EVENT_TIMER)		+= event_timer.o
 
 ifdef CONFIG_VCM
 obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60-vcm.o
@@ -425,7 +426,7 @@
 
 obj-$(CONFIG_MSM_SLEEP_STATS) += idle_stats.o
 obj-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += idle_stats_device.o
-obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o msm_dcvs_idle.o
+obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o msm_mpdecision.o
 obj-$(CONFIG_MSM_RUN_QUEUE_STATS) += msm_rq_stats.o
 obj-$(CONFIG_MSM_SHOW_RESUME_IRQ) += msm_show_resume_irq.o
 obj-$(CONFIG_BT_MSM_PINTEST)  += btpintest.o
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index 2592eee..b72e257 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -33,6 +33,7 @@
 #include <mach/rpm-regulator.h>
 #include <mach/rpm-regulator-smd.h>
 #include <mach/msm_bus.h>
+#include <mach/msm_dcvs.h>
 
 #include "acpuclock.h"
 #include "acpuclock-krait.h"
@@ -912,6 +913,17 @@
 static void __init cpufreq_table_init(void) {}
 #endif
 
+static void __init dcvs_freq_init(void)
+{
+	int i;
+
+	for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0; i++)
+		if (drv.acpu_freq_tbl[i].use_for_scaling)
+			msm_dcvs_register_cpu_freq(
+				drv.acpu_freq_tbl[i].speed.khz,
+				drv.acpu_freq_tbl[i].vdd_core / 1000);
+}
+
 static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
 					    unsigned long action, void *hcpu)
 {
@@ -1125,6 +1137,7 @@
 	hw_init();
 
 	cpufreq_table_init();
+	dcvs_freq_init();
 	acpuclk_register(&acpuclk_krait_data);
 	register_hotcpu_notifier(&acpuclk_cpu_notifier);
 
diff --git a/arch/arm/mach-msm/board-m7-display.c b/arch/arm/mach-msm/board-m7-display.c
index 217fbd6..cf6b44a 100644
--- a/arch/arm/mach-msm/board-m7-display.c
+++ b/arch/arm/mach-msm/board-m7-display.c
@@ -56,6 +56,9 @@
 #define MSM_FB_OVERLAY1_WRITEBACK_SIZE (0)
 #endif  
 
+static void m7_display_on(struct msm_fb_data_type *mfd);
+static void m7_display_off(struct msm_fb_data_type *mfd);
+
 static struct resource msm_fb_resources[] = {
 	{
 		.flags = IORESOURCE_DMA,
@@ -521,7 +524,7 @@
 	if (mdp_gamma == NULL)
 		return 0;
 
-	mdp_color_enhancement(mdp_gamma, mdp_gamma_count);
+        //	mdp_color_enhancement(mdp_gamma, mdp_gamma_count);
 	return 0;
 }
 
@@ -743,9 +746,9 @@
 static int display_off_cmds_count = 0;
 static struct dsi_cmd_desc *cmd_on_cmds = NULL;
 static int cmd_on_cmds_count = 0;
-static atomic_t lcd_backlight_off;
+//static atomic_t lcd_backlight_off;
 
-#define CABC_DIMMING_SWITCH
+//#define CABC_DIMMING_SWITCH
 
 static char enter_sleep[2] = {0x10, 0x00}; 
 static char exit_sleep[2] = {0x11, 0x00}; 
@@ -1368,6 +1371,8 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
+        m7_display_on(mfd);
+
 	mipi  = &mfd->panel_info.mipi;
 	if (!first_init) {
 		if (mipi->mode == DSI_VIDEO_MODE) {
@@ -1402,6 +1407,8 @@
 
 	resume_blk = 1;
 
+        m7_display_off(mfd);
+
 	PR_DISP_INFO("%s\n", __func__);
 	return 0;
 }
@@ -1566,9 +1573,9 @@
 {
 	int rc;
 
-	if (mdp4_overlay_dsi_state_get() <= ST_DSI_SUSPEND) {
-		return;
-	}
+        //	if (mdp4_overlay_dsi_state_get() <= ST_DSI_SUSPEND) {
+        //		return;
+        //	}
 
 	if ((panel_type == PANEL_ID_M7_JDI_SAMSUNG) ||
 		(panel_type == PANEL_ID_M7_JDI_SAMSUNG_C2) ||
@@ -1671,6 +1678,7 @@
 
 	return;
 }
+
 static void m7_color_enhance(struct msm_fb_data_type *mfd, int on)
 {
 	if ((panel_type == PANEL_ID_M7_JDI_SAMSUNG) ||
@@ -1726,6 +1734,7 @@
 		PR_DISP_INFO("switch color enhance\n");
 	}
 }
+
 static void m7_sre_ctrl(struct msm_fb_data_type *mfd, unsigned long level)
 {
 	static long prev_level = 0, current_stage = 0, prev_stage = 0, tmp_stage = 0;
@@ -1992,16 +2001,14 @@
 	.on	= m7_lcd_on,
 	.off	= m7_lcd_off,
 	.set_backlight = m7_set_backlight,
-	.display_on = m7_display_on,
-	.display_off = m7_display_off,
-	.color_enhance = m7_color_enhance,
+        //	.color_enhance = m7_color_enhance,
 #ifdef CABC_DIMMING_SWITCH
-	.dimming_on = m7_dim_on,
+        //	.dimming_on = m7_dim_on,
 #endif
 #ifdef CONFIG_FB_MSM_CABC_LEVEL_CONTROL
 	.set_cabc = m7_set_cabc,
 #endif
-	.sre_ctrl = m7_sre_ctrl,
+        //	.sre_ctrl = m7_sre_ctrl,
 };
 
 static struct msm_panel_info pinfo;
@@ -2084,9 +2091,9 @@
 	pinfo.pdest = DISPLAY_1;
 	pinfo.wait_cycle = 0;
 	pinfo.bpp = 24;
-	pinfo.width = 58;
-	pinfo.height = 103;
-	pinfo.camera_backlight = 183;
+        //	pinfo.width = 58;
+        //	pinfo.height = 103;
+        //	pinfo.camera_backlight = 183;
 
 	pinfo.lcdc.h_back_porch = 27;
 	pinfo.lcdc.h_front_porch = 38;
@@ -2163,9 +2170,9 @@
 	pinfo.pdest = DISPLAY_1;
 	pinfo.wait_cycle = 0;
 	pinfo.bpp = 24;
-        pinfo.width = 58;
-        pinfo.height = 103;
-	pinfo.camera_backlight = 183;
+        //        pinfo.width = 58;
+        //        pinfo.height = 103;
+        //	pinfo.camera_backlight = 183;
 
 	pinfo.lcdc.h_back_porch = 27;
 	pinfo.lcdc.h_front_porch = 38;
@@ -2244,9 +2251,9 @@
 	pinfo.pdest = DISPLAY_1;
 	pinfo.wait_cycle = 0;
 	pinfo.bpp = 24;
-	pinfo.width = 61;
-	pinfo.height = 110;
-	pinfo.camera_backlight = 176;
+        //	pinfo.width = 61;
+        //	pinfo.height = 110;
+        //	pinfo.camera_backlight = 176;
 
 	pinfo.lcdc.h_back_porch = 58;
 	pinfo.lcdc.h_front_porch = 100;
@@ -2329,9 +2336,9 @@
 	pinfo.pdest = DISPLAY_1;
 	pinfo.wait_cycle = 0;
 	pinfo.bpp = 24;
-	pinfo.width = 61;
-	pinfo.height = 110;
-	pinfo.camera_backlight = 176;
+        //	pinfo.width = 61;
+        //	pinfo.height = 110;
+        //	pinfo.camera_backlight = 176;
 
 	pinfo.lcdc.h_back_porch = 58;
 	pinfo.lcdc.h_front_porch = 100;
@@ -2405,9 +2412,9 @@
 	pinfo.pdest = DISPLAY_1;
 	pinfo.wait_cycle = 0;
 	pinfo.bpp = 24;
-        pinfo.width = 58;
-        pinfo.height = 103;
-	pinfo.camera_backlight = 183;
+        //        pinfo.width = 58;
+        //        pinfo.height = 103;
+        //	pinfo.camera_backlight = 183;
 
 	pinfo.lcdc.h_back_porch = 27;
 	pinfo.lcdc.h_front_porch = 38;
@@ -2457,7 +2464,7 @@
 
 	pinfo.mipi.frame_rate = 60;
 	
-	pinfo.lcdc.no_set_tear = 1;
+        //	pinfo.lcdc.no_set_tear = 1;
 
 	pinfo.mipi.dsi_phy_db = &dsi_jdi_cmd_mode_phy_db;
 
diff --git a/arch/arm/mach-msm/board-m7-gpu.c b/arch/arm/mach-msm/board-m7-gpu.c
index 3e70176..c15254c 100644
--- a/arch/arm/mach-msm/board-m7-gpu.c
+++ b/arch/arm/mach-msm/board-m7-gpu.c
@@ -13,7 +13,7 @@
 
 #include <linux/init.h>
 #include <linux/platform_device.h>
-#include <linux/msm_kgsl.h>
+#include <mach/kgsl.h>
 #include <mach/msm_bus_board.h>
 #include <mach/board.h>
 #include <mach/msm_dcvs.h>
@@ -23,26 +23,51 @@
 
 #ifdef CONFIG_MSM_DCVS
 static struct msm_dcvs_freq_entry grp3d_freq[] = {
-       {0, 0, 333932},
-       {0, 0, 497532},
-       {0, 0, 707610},
-       {0, 0, 844545},
+	{0, 900, 0, 0, 0},
+	{0, 950, 0, 0, 0},
+	{0, 950, 0, 0, 0},
+	{0, 1200, 1, 100, 100},
 };
 
 static struct msm_dcvs_core_info grp3d_core_info = {
-       .freq_tbl = &grp3d_freq[0],
-       .core_param = {
-               .max_time_us = 100000,
-               .num_freq = ARRAY_SIZE(grp3d_freq),
-       },
-       .algo_param = {
-               .slack_time_us = 39000,
-               .disable_pc_threshold = 86000,
-               .ss_window_size = 1000000,
-               .ss_util_pct = 95,
-               .em_max_util_pct = 97,
-               .ss_iobusy_conv = 100,
-       },
+	.freq_tbl		= &grp3d_freq[0],
+	.num_cores		= 1,
+	.sensors		= (int[]){0},
+	.thermal_poll_ms	= 60000,
+	.core_param		= {
+		.core_type	= MSM_DCVS_CORE_TYPE_GPU,
+	},
+	.algo_param		= {
+		.disable_pc_threshold	= 0,
+		.em_win_size_min_us	= 100000,
+		.em_win_size_max_us	= 300000,
+		.em_max_util_pct	= 97,
+		.group_id		= 0,
+		.max_freq_chg_time_us	= 100000,
+		.slack_mode_dynamic	= 0,
+		.slack_time_min_us	= 39000,
+		.slack_time_max_us	= 39000,
+		.ss_win_size_min_us	= 1000000,
+		.ss_win_size_max_us	= 1000000,
+		.ss_util_pct		= 95,
+		.ss_no_corr_below_freq	= 0,
+	},
+
+	.energy_coeffs		= {
+		.leakage_coeff_a	= -17720,
+		.leakage_coeff_b	= 37,
+		.leakage_coeff_c	= 3329,
+		.leakage_coeff_d	= -277,
+
+		.active_coeff_a		= 2492,
+		.active_coeff_b		= 0,
+		.active_coeff_c		= 0
+	},
+
+	.power_param		= {
+		.current_temp	= 25,
+		.num_freq	= ARRAY_SIZE(grp3d_freq),
+	}
 };
 #endif 
 
diff --git a/arch/arm/mach-msm/board-m7.c b/arch/arm/mach-msm/board-m7.c
index f1def7f..cfde6a2 100644
--- a/arch/arm/mach-msm/board-m7.c
+++ b/arch/arm/mach-msm/board-m7.c
@@ -27,7 +27,7 @@
 #include <linux/spi/spi.h>
 #include <linux/dma-mapping.h>
 #include <linux/platform_data/qcom_crypto_device.h>
-#include <linux/ion.h>
+#include <linux/msm_ion.h>
 #include <linux/memory.h>
 #include <linux/memblock.h>
 #include <linux/msm_thermal.h>
@@ -465,9 +465,7 @@
 };
 #endif
 
-static struct ion_platform_data ion_pdata = {
-	.nr = MSM_ION_HEAP_NUM,
-	.heaps = {
+struct ion_platform_heap msm8960_heaps[] = {
 		{
 			.id	= ION_SYSTEM_HEAP_ID,
 			.type	= ION_HEAP_TYPE_SYSTEM,
@@ -528,7 +526,11 @@
 			.extra_data = (void *) &co_m7_ion_pdata,
 		},
 #endif
-	}
+};
+
+static struct ion_platform_data ion_pdata = {
+	.nr = MSM_ION_HEAP_NUM,
+	.heaps = msm8960_heaps,	
 };
 
 static struct platform_device m7_ion_dev = {
@@ -589,7 +591,7 @@
 		const struct ion_platform_heap *heap =
 			&(ion_pdata.heaps[i]);
 
-		if (heap->type == ION_HEAP_TYPE_CP && heap->extra_data) {
+		if ((int)heap->type == (int)ION_HEAP_TYPE_CP && heap->extra_data) {
 			struct ion_cp_heap_pdata *data = heap->extra_data;
 
 			reusable_count += (data->reusable) ? 1 : 0;
@@ -611,14 +613,14 @@
 			int fixed_position = NOT_FIXED;
 			int mem_is_fmem = 0;
 
-			switch (heap->type) {
-			case ION_HEAP_TYPE_CP:
+			switch ((int)heap->type) {
+			case (int)ION_HEAP_TYPE_CP:
 				mem_is_fmem = ((struct ion_cp_heap_pdata *)
 					heap->extra_data)->mem_is_fmem;
 				fixed_position = ((struct ion_cp_heap_pdata *)
 					heap->extra_data)->fixed_position;
 				break;
-			case ION_HEAP_TYPE_CARVEOUT:
+			case (int)ION_HEAP_TYPE_CARVEOUT:
 				mem_is_fmem = ((struct ion_co_heap_pdata *)
 					heap->extra_data)->mem_is_fmem;
 				fixed_position = ((struct ion_co_heap_pdata *)
@@ -669,13 +671,13 @@
 			int fixed_position = NOT_FIXED;
 			struct ion_cp_heap_pdata *pdata = NULL;
 
-			switch (heap->type) {
-			case ION_HEAP_TYPE_CP:
+			switch ((int)heap->type) {
+			case (int)ION_HEAP_TYPE_CP:
 				pdata =
 				(struct ion_cp_heap_pdata *)heap->extra_data;
 				fixed_position = pdata->fixed_position;
 				break;
-			case ION_HEAP_TYPE_CARVEOUT:
+			case (int)ION_HEAP_TYPE_CARVEOUT:
 				fixed_position = ((struct ion_co_heap_pdata *)
 					heap->extra_data)->fixed_position;
 				break;
@@ -1192,6 +1194,16 @@
 };
 #endif 
 
+#define _GET_REGULATOR(var, name) do {				\
+	var = regulator_get(NULL, name);			\
+	if (IS_ERR(var)) {					\
+		pr_err("'%s' regulator not found, rc=%ld\n",	\
+			name, IS_ERR(var));			\
+		var = NULL;					\
+		return -ENODEV;					\
+	}							\
+} while (0)
+
 #ifdef CONFIG_FB_MSM_HDMI_MHL
 static struct pm8xxx_gpio_init switch_to_usb_pmic_gpio_table[] = {
         PM8XXX_GPIO_INIT(USBz_AUDIO_SW, PM_GPIO_DIR_OUT,
@@ -1239,16 +1251,6 @@
 static DEFINE_MUTEX(mhl_lpm_lock);
 
 
-#define _GET_REGULATOR(var, name) do {				\
-	var = regulator_get(NULL, name);			\
-	if (IS_ERR(var)) {					\
-		pr_err("'%s' regulator not found, rc=%ld\n",	\
-			name, IS_ERR(var));			\
-		var = NULL;					\
-		return -ENODEV;					\
-	}							\
-} while (0)
-
 uint32_t msm_hdmi_off_gpio[] = {
         GPIO_CFG(HDMI_DDC_CLK,  0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
         GPIO_CFG(HDMI_DDC_DATA,  0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index e8df351..5583ce6 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -33,7 +33,7 @@
 #include <mach/msm_smd.h>
 #include <mach/msm_dcvs.h>
 #include <mach/msm_rtb.h>
-#include <linux/ion.h>
+#include <linux/msm_ion.h>
 #include "clock.h"
 #include "devices.h"
 #include "footswitch.h"
@@ -2432,33 +2432,43 @@
 	},
 };
 
-static struct msm_dcvs_freq_entry apq8064_freq[] = {
-	{ 384000, 166981,  345600},
-	{ 702000, 213049,  632502},
-	{1026000, 285712,  925613},
-	{1242000, 383945, 1176550},
-	{1458000, 419729, 1465478},
-	{1512000, 434116, 1546674},
-
-};
-
 static struct msm_dcvs_core_info apq8064_core_info = {
-	.freq_tbl = &apq8064_freq[0],
-	.core_param = {
-		.max_time_us = 100000,
-		.num_freq = ARRAY_SIZE(apq8064_freq),
+	.num_cores		= 4,
+	.sensors		= (int[]){7, 8, 9, 10},
+	.thermal_poll_ms	= 60000,
+	.core_param		= {
+		.core_type	= MSM_DCVS_CORE_TYPE_CPU,
 	},
-	.algo_param = {
-		.slack_time_us = 58000,
-		.scale_slack_time = 0,
-		.scale_slack_time_pct = 0,
-		.disable_pc_threshold = 1458000,
-		.em_window_size = 100000,
-		.em_max_util_pct = 97,
-		.ss_window_size = 1000000,
-		.ss_util_pct = 95,
-		.ss_iobusy_conv = 100,
+	.algo_param		= {
+		.disable_pc_threshold		= 1458000,
+		.em_win_size_min_us		= 100000,
+		.em_win_size_max_us		= 300000,
+		.em_max_util_pct		= 97,
+		.group_id			= 1,
+		.max_freq_chg_time_us		= 100000,
+		.slack_mode_dynamic		= 0,
+		.slack_weight_thresh_pct	= 3,
+		.slack_time_min_us		= 45000,
+		.slack_time_max_us		= 45000,
+		.ss_no_corr_below_freq		= 0,
+		.ss_win_size_min_us		= 1000000,
+		.ss_win_size_max_us		= 1000000,
+		.ss_util_pct			= 95,
 	},
+	.energy_coeffs		= {
+		.active_coeff_a		= 336,
+		.active_coeff_b		= 0,
+		.active_coeff_c		= 0,
+
+		.leakage_coeff_a	= -17720,
+		.leakage_coeff_b	= 37,
+		.leakage_coeff_c	= 3329,
+		.leakage_coeff_d	= -277,
+	},
+	.power_param		= {
+		.current_temp	= 25,
+		.num_freq	= 0, /* set at runtime */
+	}
 };
 
 struct platform_device apq8064_msm_gov_device = {
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 2b3235c..420e7ce 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -15,11 +15,11 @@
 #include <linux/list.h>
 #include <linux/platform_device.h>
 #include <linux/msm_rotator.h>
-#include <linux/ion.h>
+#include <linux/msm_ion.h>
 #include <linux/gpio.h>
 #include <linux/coresight.h>
 #include <asm/clkdev.h>
-#include <linux/msm_kgsl.h>
+#include <mach/kgsl.h>
 #include <linux/android_pmem.h>
 #include <mach/irqs-8960.h>
 #include <mach/dma.h>
@@ -2889,46 +2889,89 @@
 };
 
 static struct msm_dcvs_freq_entry grp3d_freq[] = {
-	{0, 0, 333932},
-	{0, 0, 497532},
-	{0, 0, 707610},
-	{0, 0, 844545},
+	{0, 900, 0, 0, 0},
+	{0, 950, 0, 0, 0},
+	{0, 950, 0, 0, 0},
+	{0, 1200, 1, 100, 100},
 };
 
 static struct msm_dcvs_freq_entry grp2d_freq[] = {
-	{0, 0, 86000},
-	{0, 0, 200000},
+	{0, 900, 0, 0, 0},
+	{0, 950, 1, 100, 100},
 };
 
 static struct msm_dcvs_core_info grp3d_core_info = {
-	.freq_tbl = &grp3d_freq[0],
-	.core_param = {
-		.max_time_us = 100000,
-		.num_freq = ARRAY_SIZE(grp3d_freq),
+	.freq_tbl	= &grp3d_freq[0],
+	.core_param	= {
+		.core_type	= MSM_DCVS_CORE_TYPE_GPU,
 	},
-	.algo_param = {
-		.slack_time_us = 39000,
-		.disable_pc_threshold = 86000,
-		.ss_window_size = 1000000,
-		.ss_util_pct = 95,
-		.em_max_util_pct = 97,
-		.ss_iobusy_conv = 100,
+	.algo_param	= {
+		.disable_pc_threshold		= 0,
+		.em_win_size_min_us		= 100000,
+		.em_win_size_max_us		= 300000,
+		.em_max_util_pct		= 97,
+		.group_id			= 0,
+		.max_freq_chg_time_us		= 100000,
+		.slack_mode_dynamic		= 0,
+		.slack_weight_thresh_pct	= 0,
+		.slack_time_min_us		= 39000,
+		.slack_time_max_us		= 39000,
+		.ss_win_size_min_us		= 1000000,
+		.ss_win_size_max_us		= 1000000,
+		.ss_util_pct			= 95,
+		.ss_no_corr_below_freq		= 0,
 	},
+	.energy_coeffs	= {
+		.active_coeff_a		= 2492,
+		.active_coeff_b		= 0,
+		.active_coeff_c		= 0,
+
+		.leakage_coeff_a	= -17720,
+		.leakage_coeff_b	= 37,
+		.leakage_coeff_c	= 2729,
+		.leakage_coeff_d	= -277,
+	},
+	.power_param	= {
+		.current_temp	= 25,
+		.num_freq	= ARRAY_SIZE(grp3d_freq),
+	}
 };
 
 static struct msm_dcvs_core_info grp2d_core_info = {
-	.freq_tbl = &grp2d_freq[0],
-	.core_param = {
-		.max_time_us = 100000,
-		.num_freq = ARRAY_SIZE(grp2d_freq),
+	.freq_tbl	= &grp2d_freq[0],
+	.core_param	= {
+		.core_type	= MSM_DCVS_CORE_TYPE_GPU,
 	},
-	.algo_param = {
-		.slack_time_us = 39000,
-		.disable_pc_threshold = 90000,
-		.ss_window_size = 1000000,
-		.ss_util_pct = 90,
-		.em_max_util_pct = 95,
+	.algo_param	= {
+		.disable_pc_threshold		= 0,
+		.em_win_size_min_us		= 100000,
+		.em_win_size_max_us		= 300000,
+		.em_max_util_pct		= 97,
+		.group_id			= 0,
+		.max_freq_chg_time_us		= 100000,
+		.slack_mode_dynamic		= 0,
+		.slack_weight_thresh_pct	= 0,
+		.slack_time_min_us		= 39000,
+		.slack_time_max_us		= 39000,
+		.ss_win_size_min_us		= 1000000,
+		.ss_win_size_max_us		= 1000000,
+		.ss_util_pct			= 95,
+		.ss_no_corr_below_freq		= 0,
 	},
+	.energy_coeffs	= {
+		.active_coeff_a		= 2492,
+		.active_coeff_b		= 0,
+		.active_coeff_c		= 0,
+
+		.leakage_coeff_a	= -17720,
+		.leakage_coeff_b	= 37,
+		.leakage_coeff_c	= 2729,
+		.leakage_coeff_d	= -277,
+	},
+	.power_param	= {
+		.current_temp	= 25,
+		.num_freq	= ARRAY_SIZE(grp2d_freq),
+	}
 };
 
 #ifdef CONFIG_MSM_BUS_SCALING
@@ -3866,40 +3909,57 @@
 	},
 };
 
-static struct msm_dcvs_freq_entry msm8960_freq[] = {
-	{ 384000, 166981,  345600},
-	{ 702000, 213049,  632502},
-	{1026000, 285712,  925613},
-	{1242000, 383945, 1176550},
-	{1458000, 419729, 1465478},
-	{1512000, 434116, 1546674},
+static struct msm_dcvs_core_info msm8960_core_info = {
+	.num_cores		= 4,
+	.sensors		= (int[]){7, 8, 9, 10},
+	.thermal_poll_ms	= 60000,
+	.core_param		= {
+		.core_type	= MSM_DCVS_CORE_TYPE_CPU,
+	},
+	.algo_param		= {
+		.disable_pc_threshold		= 1458000,
+		.em_win_size_min_us		= 100000,
+		.em_win_size_max_us		= 300000,
+		.em_max_util_pct		= 97,
+		.group_id			= 1,
+		.max_freq_chg_time_us		= 100000,
+		.slack_mode_dynamic		= 0,
+		.slack_weight_thresh_pct	= 3,
+		.slack_time_min_us		= 45000,
+		.slack_time_max_us		= 45000,
+		.ss_no_corr_below_freq		= 0,
+		.ss_win_size_min_us		= 1000000,
+		.ss_win_size_max_us		= 1000000,
+		.ss_util_pct			= 95,
+	},
+	.energy_coeffs		= {
+		.active_coeff_a		= 336,
+		.active_coeff_b		= 0,
+		.active_coeff_c		= 0,
 
+		.leakage_coeff_a	= -17720,
+		.leakage_coeff_b	= 37,
+		.leakage_coeff_c	= 3329,
+		.leakage_coeff_d	= -277,
+	},
+	.power_param		= {
+		.current_temp	= 25,
+		.num_freq	= 0, /* set at runtime */
+	}
 };
 
-static struct msm_dcvs_core_info msm8960_core_info = {
-	.freq_tbl = &msm8960_freq[0],
-	.core_param = {
-		.max_time_us = 100000,
-		.num_freq = ARRAY_SIZE(msm8960_freq),
-	},
-	.algo_param = {
-		.slack_time_us = 58000,
-		.scale_slack_time = 0,
-		.scale_slack_time_pct = 0,
-		.disable_pc_threshold = 1458000,
-		.em_window_size = 100000,
-		.em_max_util_pct = 97,
-		.ss_window_size = 1000000,
-		.ss_util_pct = 95,
-		.ss_iobusy_conv = 100,
-	},
+#define MSM8960_LPM_LATENCY  1000 /* >100 usec for WFI */
+
+static struct msm_gov_platform_data gov_platform_data = {
+	.info = &msm8960_core_info,
+	.latency = MSM8960_LPM_LATENCY,
 };
 
 struct platform_device msm8960_msm_gov_device = {
 	.name = "msm_dcvs_gov",
 	.id = -1,
 	.dev = {
-		.platform_data = &msm8960_core_info,
+		.platform_data = &gov_platform_data,
 	},
 };
 
diff --git a/arch/arm/mach-msm/event_timer.c b/arch/arm/mach-msm/event_timer.c
new file mode 100644
index 0000000..9f46f68
--- /dev/null
+++ b/arch/arm/mach-msm/event_timer.c
@@ -0,0 +1,313 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <mach/event_timer.h>
+
+#define __INIT_HEAD(x)	{ .head = RB_ROOT,\
+			.next = NULL, }
+
+#define DEFINE_TIME_HEAD(x) struct timerqueue_head x = __INIT_HEAD(x)
+
+/**
+ * struct event_timer_info - basic event timer structure
+ * @node: timerqueue node to track time ordered data structure
+ *        of event timers
+ * @timer: hrtimer created for this event.
+ * @function : callback function for event timer.
+ * @data : callback data for event timer.
+ */
+struct event_timer_info {
+	struct timerqueue_node node;
+	void (*function)(void *);
+	void *data;
+};
+
+
+static DEFINE_TIME_HEAD(timer_head);
+static DEFINE_SPINLOCK(event_timer_lock);
+static struct hrtimer event_hrtimer;
+static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer);
+
+static int msm_event_debug_mask;
+module_param_named(
+	debug_mask, msm_event_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+enum {
+	MSM_EVENT_TIMER_DEBUG = 1U << 0,
+};
+
+
+/**
+ * add_event_timer() : Add a wakeup event. Intended to be called
+ *                     by clients once. Returns a handle to be used
+ *                     for future transactions.
+ * @function : The callback function will be called when event
+ *             timer expires.
+ * @data: callback data provided by client.
+ */
+struct event_timer_info *add_event_timer(void (*function)(void *), void *data)
+{
+	struct event_timer_info *event_info =
+			kzalloc(sizeof(struct event_timer_info), GFP_KERNEL);
+
+	if (!event_info)
+		return NULL;
+
+	event_info->function = function;
+	event_info->data = data;
+	/* Init rb node and hr timer */
+	timerqueue_init(&event_info->node);
+
+	return event_info;
+}
+
+/**
+ * is_event_next(): Helper function to check if the event is the next
+ *                   next expiring event
+ * @event : handle to the event to be checked.
+ */
+static bool is_event_next(struct event_timer_info *event)
+{
+	struct event_timer_info *next_event;
+	struct timerqueue_node *next;
+	bool ret = false;
+
+	next = timerqueue_getnext(&timer_head);
+	if (!next)
+		goto exit_is_next_event;
+
+	next_event = container_of(next, struct event_timer_info, node);
+	if (!next_event)
+		goto exit_is_next_event;
+
+	if (next_event == event)
+		ret = true;
+
+exit_is_next_event:
+	return ret;
+}
+
+/**
+ * is_event_active(): Helper function to check if the timer for a given event
+ *                    has been started.
+ * @event : handle to the event to be checked.
+ */
+static bool is_event_active(struct event_timer_info *event)
+{
+	struct timerqueue_node *next;
+	struct event_timer_info *next_event;
+	bool ret = false;
+
+	for (next = timerqueue_getnext(&timer_head); next;
+			next = timerqueue_iterate_next(next)) {
+		next_event = container_of(next, struct event_timer_info, node);
+
+		if (event == next_event) {
+			ret = true;
+			break;
+		}
+	}
+	return ret;
+}
+
+/**
+ * create_httimer(): Helper function to setup hrtimer.
+ */
+static void create_hrtimer(ktime_t expires)
+{
+	static bool timer_initialized;
+
+	if (!timer_initialized) {
+		hrtimer_init(&event_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+		timer_initialized = true;
+	}
+
+	event_hrtimer.function = event_hrtimer_cb;
+	hrtimer_start(&event_hrtimer, expires, HRTIMER_MODE_ABS);
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_info("%s: Setting timer for %lu", __func__,
+			(unsigned long)ktime_to_ns(expires));
+}
+
+/**
+ * event_hrtimer_cb() : Callback function for hr timer.
+ *                      Make the client CB from here and remove the event
+ *                      from the time ordered queue.
+ */
+static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer)
+{
+	struct event_timer_info *event;
+	struct timerqueue_node *next;
+
+	next = timerqueue_getnext(&timer_head);
+
+	while (next && (ktime_to_ns(next->expires)
+		<= ktime_to_ns(hrtimer->node.expires))) {
+		if (!next)
+			goto hrtimer_cb_exit;
+
+		event = container_of(next, struct event_timer_info, node);
+		if (!event)
+			goto hrtimer_cb_exit;
+
+		if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+			pr_info("%s: Deleting event @ %lu", __func__,
+			(unsigned long)ktime_to_ns(next->expires));
+
+		timerqueue_del(&timer_head, &event->node);
+
+		if (event->function)
+			event->function(event->data);
+		next = timerqueue_getnext(&timer_head);
+	}
+
+	if (next)
+		create_hrtimer(next->expires);
+
+hrtimer_cb_exit:
+	return HRTIMER_NORESTART;
+}
+
+/**
+ * create_timer_smp(): Helper function used setting up timer on core 0.
+ */
+static void create_timer_smp(void *data)
+{
+	unsigned long flags;
+	struct event_timer_info *event =
+		(struct event_timer_info *)data;
+	struct timerqueue_node *next;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	if (is_event_active(event))
+		timerqueue_del(&timer_head, &event->node);
+
+	next = timerqueue_getnext(&timer_head);
+	timerqueue_add(&timer_head, &event->node);
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+
+	if (!next ||
+		(next && (ktime_to_ns(event->node.expires) <
+				ktime_to_ns(next->expires)))) {
+		if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+			pr_info("%s: Setting timer for %lu", __func__,
+			(unsigned long)ktime_to_ns(event->node.expires));
+		create_hrtimer(event->node.expires);
+	}
+}
+
+/**
+ *  setup_timer() : Helper function to setup timer on primary
+ *                  core during hrtimer callback.
+ *  @event: event handle causing the wakeup.
+ */
+static void setup_event_hrtimer(struct event_timer_info *event)
+{
+	smp_call_function_single(0, create_timer_smp, event, 1);
+}
+
+/**
+ * activate_event_timer() : Set the expiration time for an event in absolute
+ *                           ktime. This is a oneshot event timer, clients
+ *                           should call this again to set another expiration.
+ *  @event : event handle.
+ *  @event_time : event time in absolute ktime.
+ */
+void activate_event_timer(struct event_timer_info *event, ktime_t event_time)
+{
+	if (!event)
+		return;
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_info("%s: Adding event timer @ %lu", __func__,
+				(unsigned long)ktime_to_us(event_time));
+
+	event->node.expires = event_time;
+	/* Start hr timer and add event to rb tree */
+	setup_event_hrtimer(event);
+}
+
+
+/**
+ * deactivate_event_timer() : Deactivate an event timer, this removes the event from
+ *                            the time ordered queue of event timers.
+ * @event: event handle.
+ */
+void deactivate_event_timer(struct event_timer_info *event)
+{
+	unsigned long flags;
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_info("%s: Deactivate timer", __func__);
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	if (is_event_active(event)) {
+		if (is_event_next(event))
+			hrtimer_try_to_cancel(&event_hrtimer);
+
+		timerqueue_del(&timer_head, &event->node);
+	}
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+}
+
+/**
+ * destroy_event_timer() : Free the event info data structure allocated during
+ *                         add_event_timer().
+ * @event: event handle.
+ */
+void destroy_event_timer(struct event_timer_info *event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	if (is_event_active(event)) {
+		if (is_event_next(event))
+			hrtimer_try_to_cancel(&event_hrtimer);
+
+		timerqueue_del(&timer_head, &event->node);
+	}
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+	kfree(event);
+}
+
+/**
+ * get_next_event_timer() - Get the next wakeup event. Returns
+ *                          a ktime value of the next expiring event.
+ */
+ktime_t get_next_event_time(void)
+{
+	unsigned long flags;
+	struct timerqueue_node *next;
+	ktime_t next_event = ns_to_ktime(0);
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	next = timerqueue_getnext(&timer_head);
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+
+	if (!next)
+		return next_event;
+
+	next_event = hrtimer_get_remaining(&event_hrtimer);
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_info("%s: Next Event %lu", __func__,
+			(unsigned long)ktime_to_us(next_event));
+
+	return next_event;
+}
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index b7b287d..101e65a 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -617,6 +617,7 @@
 struct msm_fb_platform_data {
 	int (*detect_client)(const char *name);
 	int mddi_prescan;
+	unsigned char ext_resolution;
 	int (*allow_set_offset)(void);
 	char prim_panel_name[PANEL_NAME_MAX_LEN];
 	char ext_panel_name[PANEL_NAME_MAX_LEN];
diff --git a/arch/arm/mach-msm/include/mach/event_timer.h b/arch/arm/mach-msm/include/mach/event_timer.h
new file mode 100644
index 0000000..7a00b23
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/event_timer.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
+#define __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
+
+#include <linux/hrtimer.h>
+
+struct event_timer_info;
+
+#ifdef CONFIG_MSM_EVENT_TIMER
+/**
+ * add_event_timer() : Add a wakeup event. Intended to be called
+ *                     by clients once. Returns a handle to be used
+ *                     for future transactions.
+ * @function : The callback function will be called when event
+ *             timer expires.
+ * @data : Callback data provided by client.
+ */
+struct event_timer_info *add_event_timer(void (*function)(void *), void *data);
+
+/** activate_event_timer() : Set the expiration time for an event in absolute
+ *                           ktime. This is a oneshot event timer, clients
+ *                           should call this again to set another expiration.
+ *  @event : Event handle.
+ *  @event_time : Event time in absolute ktime.
+ */
+void activate_event_timer(struct event_timer_info *event, ktime_t event_time);
+
+/**
+ * deactivate_event_timer() : Deactivate an event timer.
+ * @event: event handle.
+ */
+void deactivate_event_timer(struct event_timer_info *event);
+
+/**
+ * destroy_event_timer() : Free the event info data structure allocated during
+ * add_event_timer().
+ * @event: event handle.
+ */
+void destroy_event_timer(struct event_timer_info *event);
+
+/**
+ * get_next_event_timer() : Get the next wakeup event.
+ *                          returns a ktime value of the next
+ *                          expiring event.
+ */
+ktime_t get_next_event_time(void);
+#else
+static inline void *add_event_timer(void (*function)(void *), void *data)
+{
+	return NULL;
+}
+
+static inline void activate_event_timer(void *event, ktime_t event_time) {}
+
+static inline void deactivate_event_timer(void *event) {}
+
+static inline void destroy_event_timer(void *event) {}
+
+static inline ktime_t get_next_event_time(void)
+{
+	return ns_to_ktime(0);
+}
+
+#endif /* CONFIG_MSM_EVENT_TIMER_MANAGER */
+#endif /* __ARCH_ARM_MACH_MSM_EVENT_TIMER_H */
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index ecf53f8..cafdc9a 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -67,6 +67,7 @@
 int platform_physical_remove_pages(u64, u64);
 int platform_physical_active_pages(u64, u64);
 int platform_physical_low_power_pages(u64, u64);
+int msm_get_memory_type_from_name(const char *memtype_name);
 
 extern int (*change_memory_power)(u64, u64, int);
 
@@ -107,6 +108,23 @@
 	(virt) - MEMBANK0_PAGE_OFFSET + MEMBANK0_PHYS_OFFSET)
 #endif
 
+/*
+ * Need a temporary unique variable that no one will ever see to
+ * hold the compat string. Line number gives this easily.
+ * Need another layer of indirection to get __LINE__ to expand
+ * properly as opposed to appending and ending up with
+ * __compat___LINE__
+ */
+#define __CONCAT(a, b)	___CONCAT(a, b)
+#define ___CONCAT(a, b)	a ## b
+
+#define EXPORT_COMPAT(com)	\
+static char *__CONCAT(__compat_, __LINE__)  __used \
+	__attribute((__section__(".exportcompat.init"))) = com
+
+extern char *__compat_exports_start[];
+extern char *__compat_exports_end[];
+
 #endif
 
 #if defined CONFIG_ARCH_MSM_SCORPION || defined CONFIG_ARCH_MSM_KRAIT
diff --git a/arch/arm/mach-msm/include/mach/msm_dcvs.h b/arch/arm/mach-msm/include/mach/msm_dcvs.h
index fc2ffe9..2ad7d22 100644
--- a/arch/arm/mach-msm/include/mach/msm_dcvs.h
+++ b/arch/arm/mach-msm/include/mach/msm_dcvs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,12 @@
 #define CORE_NAME_MAX (32)
 #define CORES_MAX (10)
 
+#define CPU_OFFSET	1  /* used to notify TZ the core number */
+#define GPU_OFFSET (CORES_MAX * 2/3)  /* there will be more cpus than gpus,
+				     * let the GPU be assigned fewer core
+				     * elements and start later
+				     */
+
 enum msm_core_idle_state {
 	MSM_DCVS_IDLE_ENTER,
 	MSM_DCVS_IDLE_EXIT,
@@ -30,39 +36,142 @@
 	MSM_DCVS_DISABLE_HIGH_LATENCY_MODES,
 };
 
-struct msm_dcvs_idle {
-	const char *core_name;
-	
-	int (*enable)(struct msm_dcvs_idle *self,
-			enum msm_core_control_event event);
+struct msm_dcvs_sync_rule {
+	unsigned long cpu_khz;
+	unsigned long gpu_floor_khz;
 };
 
-extern int msm_dcvs_idle_source_register(struct msm_dcvs_idle *drv);
+struct msm_dcvs_platform_data {
+	struct msm_dcvs_sync_rule *sync_rules;
+	unsigned num_sync_rules;
+	unsigned long gpu_max_nom_khz;
+};
 
-extern int msm_dcvs_idle_source_unregister(struct msm_dcvs_idle *drv);
+struct msm_gov_platform_data {
+	struct msm_dcvs_core_info *info;
+	int latency;
+};
 
-int msm_dcvs_idle(int handle, enum msm_core_idle_state state,
+/**
+ * msm_dcvs_register_cpu_freq
+ * @freq: the frequency value to register
+ * @voltage: the operating voltage (in mV) associated with the above frequency
+ *
+ * Register a cpu frequency and its operating voltage with dcvs.
+ */
+#ifdef CONFIG_MSM_DCVS
+void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage);
+#else
+static inline void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage)
+{}
+#endif
+
+/**
+ * msm_dcvs_idle
+ * @dcvs_core_id: The id returned by msm_dcvs_register_core
+ * @state: The enter/exit idle state the core is in
+ * @iowaited: iowait in us
+ * on iMSM_DCVS_IDLE_EXIT.
+ * @return:
+ *	0 on success,
+ *	-ENOSYS,
+ *	-EINVAL,
+ *	SCM return values
+ *
+ * Send idle state notifications to the msm_dcvs driver
+ */
+int msm_dcvs_idle(int dcvs_core_id, enum msm_core_idle_state state,
 		uint32_t iowaited);
 
+/**
+ * struct msm_dcvs_core_info
+ *
+ * Core specific information used by algorithm. Need to provide this
+ * before the sink driver can be registered.
+ */
 struct msm_dcvs_core_info {
-	struct msm_dcvs_freq_entry *freq_tbl;
-	struct msm_dcvs_core_param core_param;
-	struct msm_dcvs_algo_param algo_param;
+	int					num_cores;
+	int					*sensors;
+	int					thermal_poll_ms;
+	struct msm_dcvs_freq_entry		*freq_tbl;
+	struct msm_dcvs_core_param		core_param;
+	struct msm_dcvs_algo_param		algo_param;
+	struct msm_dcvs_energy_curve_coeffs	energy_coeffs;
+	struct msm_dcvs_power_params		power_param;
 };
 
-extern int msm_dcvs_register_core(const char *core_name, uint32_t group_id,
-		struct msm_dcvs_core_info *info);
+/**
+ * msm_dcvs_register_core
+ * @type: whether this is a CPU or a GPU
+ * @type_core_num: The number of the core for a type
+ * @info: The core specific algorithm parameters.
+ * @sensor: The thermal sensor number of the core in question
+ * @return :
+ *	0 on success,
+ *	-ENOSYS,
+ *	-ENOMEM
+ *
+ * Register the core with msm_dcvs driver. Done once at init before calling
+ * msm_dcvs_freq_sink_register
+ * Cores that need to run synchronously must share the same group id.
+ */
+extern int msm_dcvs_register_core(
+	enum msm_dcvs_core_type type,
+	int type_core_num,
+	struct msm_dcvs_core_info *info,
+	int (*set_frequency)(int type_core_num, unsigned int freq),
+	unsigned int (*get_frequency)(int type_core_num),
+	int (*idle_enable)(int type_core_num,
+				enum msm_core_control_event event),
+	int (*set_floor_frequency)(int type_core_num, unsigned int freq),
+	int sensor);
 
-struct msm_dcvs_freq {
-	const char *core_name;
-	
-	int (*set_frequency)(struct msm_dcvs_freq *self,
-			unsigned int freq);
-	unsigned int (*get_frequency)(struct msm_dcvs_freq *self);
-};
+/**
+ * msm_dcvs_freq_sink_start
+ * @drv: The sink driver
+ * @return: Handle unique to the core.
+ *
+ * Register the clock driver code with the msm_dvs driver to get notified about
+ * frequency change requests.
+ */
+extern int msm_dcvs_freq_sink_start(int dcvs_core_id);
 
-extern int msm_dcvs_freq_sink_register(struct msm_dcvs_freq *drv);
+/**
+ * msm_dcvs_freq_sink_stop
+ * @drv: The sink driver
+ * @return:
+ *	0 on success,
+ *	-EINVAL
+ *
+ * Unregister the sink driver for the core. This will cause the source driver
+ * for the core to stop sending idle pulses.
+ */
+extern int msm_dcvs_freq_sink_stop(int dcvs_core_id);
 
-extern int msm_dcvs_freq_sink_unregister(struct msm_dcvs_freq *drv);
+/**
+ * msm_dcvs_update_limits
+ * @drv: The sink driver
+ *
+ * Update the frequency known to dcvs when the limits are changed.
+ */
+extern void msm_dcvs_update_limits(int dcvs_core_id);
 
+/**
+ * msm_dcvs_apply_gpu_floor
+ * @cpu_freq: CPU frequency to compare to GPU sync rules
+ *
+ * Apply a GPU floor frequency if the corresponding CPU frequency,
+ * or the number of CPUs online, requires it.
+ */
+extern void msm_dcvs_apply_gpu_floor(unsigned long cpu_freq);
+
+/**
+ * msm_dcvs_update_algo_params
+ * @return:
+ *      0 on success, < 0 on error
+ *
+ * Updates the DCVS algorithm with parameters depending on the
+ * number of CPUs online.
+ */
+extern int msm_dcvs_update_algo_params(void);
 #endif
diff --git a/arch/arm/mach-msm/include/mach/msm_dcvs_scm.h b/arch/arm/mach-msm/include/mach/msm_dcvs_scm.h
index a2566ba..7eefd54 100644
--- a/arch/arm/mach-msm/include/mach/msm_dcvs_scm.h
+++ b/arch/arm/mach-msm/include/mach/msm_dcvs_scm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,51 +13,222 @@
 #ifndef _ARCH_ARM_MACH_MSM_MSM_DCVS_SCM_H
 #define _ARCH_ARM_MACH_MSM_MSM_DCVS_SCM_H
 
+enum msm_dcvs_core_type {
+	MSM_DCVS_CORE_TYPE_CPU = 0,
+	MSM_DCVS_CORE_TYPE_GPU = 1,
+};
+
+enum msm_dcvs_algo_param_type {
+	MSM_DCVS_ALGO_DCVS_PARAM = 0,
+	MSM_DCVS_ALGO_MPD_PARAM  = 1,
+};
+
 enum msm_dcvs_scm_event {
-	MSM_DCVS_SCM_IDLE_ENTER,
-	MSM_DCVS_SCM_IDLE_EXIT,
-	MSM_DCVS_SCM_QOS_TIMER_EXPIRED,
-	MSM_DCVS_SCM_CLOCK_FREQ_UPDATE,
-	MSM_DCVS_SCM_ENABLE_CORE,
-	MSM_DCVS_SCM_RESET_CORE,
+	MSM_DCVS_SCM_IDLE_ENTER = 0, /* Core enters idle */
+	MSM_DCVS_SCM_IDLE_EXIT = 1, /* Core exits idle */
+	MSM_DCVS_SCM_QOS_TIMER_EXPIRED = 2, /* Core slack timer expired */
+	MSM_DCVS_SCM_CLOCK_FREQ_UPDATE = 3, /* Core freq change complete */
+	MSM_DCVS_SCM_CORE_ONLINE = 4, /* Core is online */
+	MSM_DCVS_SCM_CORE_OFFLINE = 5, /* Core is offline */
+	MSM_DCVS_SCM_CORE_UNAVAILABLE = 6, /* Core is offline + unavailable */
+	MSM_DCVS_SCM_DCVS_ENABLE = 7, /* DCVS is enabled/disabled for core */
+	MSM_DCVS_SCM_MPD_ENABLE = 8, /* Enable/disable MP Decision */
+	MSM_DCVS_SCM_RUNQ_UPDATE = 9, /* Update running threads */
+	MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED = 10, /* MPDecision slack timer */
 };
 
 struct msm_dcvs_algo_param {
-	uint32_t slack_time_us;
-	uint32_t scale_slack_time;
-	uint32_t scale_slack_time_pct;
 	uint32_t disable_pc_threshold;
-	uint32_t em_window_size;
+	uint32_t em_win_size_min_us;
+	uint32_t em_win_size_max_us;
 	uint32_t em_max_util_pct;
-	uint32_t ss_window_size;
+	uint32_t group_id;
+	uint32_t max_freq_chg_time_us;
+	uint32_t slack_mode_dynamic;
+	uint32_t slack_time_min_us;
+	uint32_t slack_time_max_us;
+	uint32_t slack_weight_thresh_pct;
+	uint32_t ss_no_corr_below_freq;
+	uint32_t ss_win_size_min_us;
+	uint32_t ss_win_size_max_us;
 	uint32_t ss_util_pct;
-	uint32_t ss_iobusy_conv;
 };
 
 struct msm_dcvs_freq_entry {
-	uint32_t freq; 
-	uint32_t idle_energy;
-	uint32_t active_energy;
+	uint32_t freq;
+	uint32_t voltage;
+	uint32_t is_trans_level;
+	uint32_t active_energy_offset;
+	uint32_t leakage_energy_offset;
+};
+
+struct msm_dcvs_energy_curve_coeffs {
+	int32_t active_coeff_a;
+	int32_t active_coeff_b;
+	int32_t active_coeff_c;
+
+	int32_t leakage_coeff_a;
+	int32_t leakage_coeff_b;
+	int32_t leakage_coeff_c;
+	int32_t leakage_coeff_d;
+};
+
+struct msm_dcvs_power_params {
+	uint32_t current_temp;
+	uint32_t num_freq; /* number of msm_dcvs_freq_entry passed */
 };
 
 struct msm_dcvs_core_param {
-	uint32_t max_time_us;
-	uint32_t num_freq; 
+	uint32_t core_type;
+	uint32_t core_bitmask_id;
 };
 
+struct msm_mpd_algo_param {
+	uint32_t em_win_size_min_us;
+	uint32_t em_win_size_max_us;
+	uint32_t em_max_util_pct;
+	uint32_t mp_em_rounding_point_min;
+	uint32_t mp_em_rounding_point_max;
+	uint32_t online_util_pct_min;
+	uint32_t online_util_pct_max;
+	uint32_t slack_time_min_us;
+	uint32_t slack_time_max_us;
+};
 
 #ifdef CONFIG_MSM_DCVS
+/**
+ * Initialize DCVS algorithm in TrustZone.
+ * Must call before invoking any other DCVS call into TZ.
+ *
+ * @size: Size of buffer in bytes
+ *
+ * @return:
+ *	0 on success.
+ *	-EEXIST: DCVS algorithm already initialized.
+ *	-EINVAL: Invalid args.
+ */
 extern int msm_dcvs_scm_init(size_t size);
 
-extern int msm_dcvs_scm_create_group(uint32_t id);
+/**
+ * Registers cores with the DCVS algo.
+ *
+ * @core_id: The core identifier that will be used for communication with DCVS
+ * @param: The core parameters
+ * @freq: Array of frequency and energy values
+ *
+ * @return:
+ *	0 on success.
+ *	-ENOMEM: Insufficient memory.
+ *	-EINVAL: Invalid args.
+ */
+extern int msm_dcvs_scm_register_core(uint32_t core_id,
+		struct msm_dcvs_core_param *param);
 
-extern int msm_dcvs_scm_register_core(uint32_t core_id, uint32_t group_id,
-		struct msm_dcvs_core_param *param,
-		struct msm_dcvs_freq_entry *freq);
-
+/**
+ * Set DCVS algorithm parameters
+ *
+ * @core_id: The algorithm parameters specific for the core
+ * @param: The param data structure
+ *
+ * @return:
+ *	0 on success.
+ *	-EINVAL: Invalid args.
+ */
 extern int msm_dcvs_scm_set_algo_params(uint32_t core_id,
 		struct msm_dcvs_algo_param *param);
 
+/**
+ * Set MPDecision algorithm parameters
+ *
+ * @param: The param data structure
+ *	0 on success.
+ *	-EINVAL: Invalid args.
+ */
+extern int msm_mpd_scm_set_algo_params(struct msm_mpd_algo_param *param);
+
+/**
+ * Set frequency and power characteristics for the core.
+ *
+ * @param core_id: The core identifier that will be used to interace with the
+ *                 DCVS algo.
+ * @param pwr_param: power params
+ * @param freq_entry: frequency characteristics desired
+ * @param coeffs: Coefficients that will describe the power curve
+ *
+ * @return int
+ *	0 on success.
+ *	-EINVAL: Invalid args.
+ */
+extern int msm_dcvs_scm_set_power_params(uint32_t core_id,
+				struct msm_dcvs_power_params *pwr_param,
+				struct msm_dcvs_freq_entry *freq_entry,
+				struct msm_dcvs_energy_curve_coeffs *coeffs);
+
+/**
+ * Do an SCM call.
+ *
+ * @core_id: The core identifier.
+ * @event_id: The event that occured.
+ *	Possible values:
+ *	MSM_DCVS_SCM_IDLE_ENTER
+ *		@param0: unused
+ *		@param1: unused
+ *		@ret0: unused
+ *		@ret1: unused
+ *	MSM_DCVS_SCM_IDLE_EXIT
+ *		@param0: Did the core iowait
+ *		@param1: unused
+ *		@ret0: New clock frequency for the core in KHz
+ *		@ret1: New QoS timer value for the core in usec
+ *	MSM_DCVS_SCM_QOS_TIMER_EXPIRED
+ *		@param0: unused
+ *		@param1: unused
+ *		@ret0: New clock frequency for the core in KHz
+ *		@ret1: unused
+ *	MSM_DCVS_SCM_CLOCK_FREQ_UPDATE
+ *		@param0: active clock frequency of the core in KHz
+ *		@param1: time taken in usec to switch to the frequency
+ *		@ret0: New QoS timer value for the core in usec
+ *		@ret1: unused
+ *	MSM_DCVS_SCM_CORE_ONLINE
+ *		@param0: active clock frequency of the core in KHz
+ *		@param1: time taken to online the core
+ *		@ret0: unused
+ *		@ret1: unused
+ *	MSM_DCVS_SCM_CORE_OFFLINE
+ *		@param0: time taken to offline the core
+ *		@param1: unused
+ *		@ret0: unused
+ *		@ret1: unused
+ *	MSM_DCVS_SCM_CORE_UNAVAILABLE
+ *		@param0: TODO:bitmask
+ *		@param1: unused
+ *		@ret0: Bitmask of cores to bring online/offline.
+ *		@ret1: Mp Decision slack time. Common to all cores.
+ *	MSM_DCVS_SCM_DCVS_ENABLE
+ *		@param0: 1 to enable; 0 to disable DCVS
+ *		@param1: unused
+ *		@ret0: New clock frequency for the core in KHz
+ *		@ret1: unused
+ *	MSM_DCVS_SCM_MPD_ENABLE
+ *		@param0: 1 to enable; 0 to disable MP Decision
+ *		@param1: unused
+ *		@ret0: unused
+ *		@ret1: unused
+ *	MSM_DCVS_SCM_RUNQ_UPDATE
+ *		@param0: run q value
+ *		@param1: unused
+ *		@ret0: Bitmask of cores online
+ *		@ret1: New QoS timer for MP Decision (usec)
+ *	MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED
+ *		@param0: unused
+ *		@param1: unused
+ *		@ret0: Bitmask of cores online
+ *		@ret1: New QoS timer for MP Decision (usec)
+ *	@return:
+ *		0 on success,
+ *		SCM return values
+ */
 extern int msm_dcvs_scm_event(uint32_t core_id,
 		enum msm_dcvs_scm_event event_id,
 		uint32_t param0, uint32_t param1,
@@ -66,16 +237,21 @@
 #else
 static inline int msm_dcvs_scm_init(uint32_t phy, size_t bytes)
 { return -ENOSYS; }
-static inline int msm_dcvs_scm_create_group(uint32_t id)
-{ return -ENOSYS; }
 static inline int msm_dcvs_scm_register_core(uint32_t core_id,
-		uint32_t group_id,
 		struct msm_dcvs_core_param *param,
 		struct msm_dcvs_freq_entry *freq)
 { return -ENOSYS; }
 static inline int msm_dcvs_scm_set_algo_params(uint32_t core_id,
 		struct msm_dcvs_algo_param *param)
 { return -ENOSYS; }
+static inline int msm_mpd_scm_set_algo_params(
+		struct msm_mpd_algo_param *param)
+{ return -ENOSYS; }
+static inline int msm_dcvs_set_power_params(uint32_t core_id,
+		struct msm_dcvs_power_params *pwr_param,
+		struct msm_dcvs_freq_entry *freq_entry,
+		struct msm_dcvs_energy_curve_coeffs *coeffs)
+{ return -ENOSYS; }
 static inline int msm_dcvs_scm_event(uint32_t core_id,
 		enum msm_dcvs_scm_event event_id,
 		uint32_t param0, uint32_t param1,
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
new file mode 100644
index 0000000..753443b
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_OCMEM_H
+#define _ARCH_ARM_MACH_MSM_OCMEM_H
+
+#include <asm/page.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+#define OCMEM_MIN_ALLOC SZ_64K
+#define OCMEM_MIN_ALIGN SZ_64K
+
+/* Maximum number of slots in DM */
+#define OCMEM_MAX_CHUNKS 32
+#define MIN_CHUNK_SIZE 128
+
+struct ocmem_notifier;
+
+struct ocmem_buf {
+	unsigned long addr;
+	unsigned long len;
+};
+
+struct ocmem_buf_attr {
+	unsigned long paddr;
+	unsigned long len;
+};
+
+struct ocmem_chunk {
+	bool ro;
+	unsigned long ddr_paddr;
+	unsigned long size;
+};
+
+struct ocmem_map_list {
+	unsigned num_chunks;
+	struct ocmem_chunk chunks[OCMEM_MAX_CHUNKS];
+};
+
+enum ocmem_power_state {
+	OCMEM_OFF = 0x0,
+	OCMEM_RETENTION,
+	OCMEM_ON,
+	OCMEM_MAX = OCMEM_ON,
+};
+
+struct ocmem_resource {
+	unsigned resource_id;
+	unsigned num_keys;
+	unsigned int *keys;
+};
+
+struct ocmem_vectors {
+	unsigned num_resources;
+	struct ocmem_resource *r;
+};
+
+/* List of clients that allocate/interact with OCMEM */
+/* Must be in sync with client_names */
+enum ocmem_client {
+	/* GMEM clients */
+	OCMEM_GRAPHICS = 0x0,
+	/* TCMEM clients */
+	OCMEM_VIDEO,
+	OCMEM_CAMERA,
+	/* Dummy Clients */
+	OCMEM_HP_AUDIO,
+	OCMEM_VOICE,
+	/* IMEM Clients */
+	OCMEM_LP_AUDIO,
+	OCMEM_SENSORS,
+	OCMEM_OTHER_OS,
+	OCMEM_CLIENT_MAX,
+};
+
+/**
+ * List of OCMEM notification events which will be broadcasted
+ * to clients that optionally register for these notifications
+ * on a per allocation basis.
+ **/
+enum ocmem_notif_type {
+	OCMEM_MAP_DONE = 1,
+	OCMEM_MAP_FAIL,
+	OCMEM_UNMAP_DONE,
+	OCMEM_UNMAP_FAIL,
+	OCMEM_ALLOC_GROW,
+	OCMEM_ALLOC_SHRINK,
+	OCMEM_NOTIF_TYPE_COUNT,
+};
+
+/* APIS */
+/* Notification APIs */
+struct ocmem_notifier *ocmem_notifier_register(int client_id,
+						struct notifier_block *nb);
+
+int ocmem_notifier_unregister(struct ocmem_notifier *notif_hndl,
+				struct notifier_block *nb);
+
+/* Obtain the maximum quota for the client */
+unsigned long get_max_quota(int client_id);
+
+/* Allocation APIs */
+struct ocmem_buf *ocmem_allocate(int client_id, unsigned long size);
+
+struct ocmem_buf *ocmem_allocate_nowait(int client_id, unsigned long size);
+
+struct ocmem_buf *ocmem_allocate_nb(int client_id, unsigned long size);
+
+struct ocmem_buf *ocmem_allocate_range(int client_id, unsigned long min,
+			unsigned long goal, unsigned long step);
+
+/* Free APIs */
+int ocmem_free(int client_id, struct ocmem_buf *buf);
+
+/* Dynamic Resize APIs */
+int ocmem_shrink(int client_id, struct ocmem_buf *buf,
+			unsigned long new_size);
+
+/* Transfer APIs */
+int ocmem_map(int client_id, struct ocmem_buf *buffer,
+			struct ocmem_map_list *list);
+
+
+int ocmem_unmap(int client_id, struct ocmem_buf *buffer,
+			struct ocmem_map_list *list);
+
+/* Priority Enforcement APIs */
+int ocmem_evict(int client_id);
+
+int ocmem_restore(int client_id);
+
+/* Power Control APIs */
+int ocmem_set_power_state(int client_id, struct ocmem_buf *buf,
+				enum ocmem_power_state new_state);
+
+enum ocmem_power_state ocmem_get_power_state(int client_id,
+				struct ocmem_buf *buf);
+
+struct ocmem_vectors *ocmem_get_vectors(int client_id,
+						struct ocmem_buf *buf);
+#endif
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 45edbc5..45e0d5c 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -353,3 +353,24 @@
 {
 	return fmem_set_state(FMEM_T_STATE);
 }
+
+static char * const memtype_names[] = {
+	[MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
+	[MEMTYPE_SMI]	= "SMI",
+	[MEMTYPE_EBI0] = "EBI0",
+	[MEMTYPE_EBI1] = "EBI1",
+};
+
+int msm_get_memory_type_from_name(const char *memtype_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
+		if (memtype_names[i] &&
+		    strcmp(memtype_name, memtype_names[i]) == 0)
+			return i;
+	}
+
+	pr_err("Could not find memory type %s\n", memtype_name);
+	return -EINVAL;
+}
diff --git a/arch/arm/mach-msm/msm_dcvs.c b/arch/arm/mach-msm/msm_dcvs.c
index 3dab7a5..9e0be63 100644
--- a/arch/arm/mach-msm/msm_dcvs.c
+++ b/arch/arm/mach-msm/msm_dcvs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,6 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/kthread.h>
 #include <linux/kobject.h>
 #include <linux/ktime.h>
@@ -23,252 +22,522 @@
 #include <linux/spinlock.h>
 #include <linux/stringify.h>
 #include <linux/debugfs.h>
+#include <linux/msm_tsens.h>
+#include <linux/platform_device.h>
 #include <asm/atomic.h>
 #include <asm/page.h>
 #include <mach/msm_dcvs.h>
+#include <trace/events/mpdcvs_trace.h>
 
 #define CORE_HANDLE_OFFSET (0xA0)
 #define __err(f, ...) pr_err("MSM_DCVS: %s: " f, __func__, __VA_ARGS__)
 #define __info(f, ...) pr_info("MSM_DCVS: %s: " f, __func__, __VA_ARGS__)
 #define MAX_PENDING	(5)
 
-enum {
-	MSM_DCVS_DEBUG_NOTIFIER    = BIT(0),
-	MSM_DCVS_DEBUG_IDLE_PULSE  = BIT(1),
-	MSM_DCVS_DEBUG_FREQ_CHANGE = BIT(2),
-};
+#define CORE_FLAG_TEMP_UPDATE		0x1
 
 struct core_attribs {
-	struct kobj_attribute idle_enabled;
-	struct kobj_attribute freq_change_enabled;
-	struct kobj_attribute actual_freq;
 	struct kobj_attribute freq_change_us;
 
-	struct kobj_attribute max_time_us;
-
-	struct kobj_attribute slack_time_us;
-	struct kobj_attribute scale_slack_time;
-	struct kobj_attribute scale_slack_time_pct;
 	struct kobj_attribute disable_pc_threshold;
-	struct kobj_attribute em_window_size;
+	struct kobj_attribute em_win_size_min_us;
+	struct kobj_attribute em_win_size_max_us;
 	struct kobj_attribute em_max_util_pct;
-	struct kobj_attribute ss_window_size;
+	struct kobj_attribute group_id;
+	struct kobj_attribute max_freq_chg_time_us;
+	struct kobj_attribute slack_mode_dynamic;
+	struct kobj_attribute slack_time_min_us;
+	struct kobj_attribute slack_time_max_us;
+	struct kobj_attribute slack_weight_thresh_pct;
+	struct kobj_attribute ss_no_corr_below_freq;
+	struct kobj_attribute ss_win_size_min_us;
+	struct kobj_attribute ss_win_size_max_us;
 	struct kobj_attribute ss_util_pct;
-	struct kobj_attribute ss_iobusy_conv;
+
+	struct kobj_attribute active_coeff_a;
+	struct kobj_attribute active_coeff_b;
+	struct kobj_attribute active_coeff_c;
+	struct kobj_attribute leakage_coeff_a;
+	struct kobj_attribute leakage_coeff_b;
+	struct kobj_attribute leakage_coeff_c;
+	struct kobj_attribute leakage_coeff_d;
+
+	struct kobj_attribute thermal_poll_ms;
+
+	struct kobj_attribute freq_tbl;
+	struct kobj_attribute offset_tbl;
 
 	struct attribute_group attrib_group;
 };
 
+enum pending_freq_state {
+	/*
+	 * used by the thread to check if pending_freq was updated while it was
+	 * setting previous frequency - this is written to and used by the
+	 * freq updating thread
+	 */
+	NO_OUTSTANDING_FREQ_CHANGE = 0,
+
+	/*
+	 * This request is set to indicate that the governor is stopped and no
+	 * more frequency change requests are accepted untill it starts again.
+	 * This is checked/used by the threads that want to change the freq
+	 */
+	STOP_FREQ_CHANGE = -1,
+
+	/*
+	 * Any other +ve value means that a freq change was requested and the
+	 * thread has not gotten around to update it
+	 *
+	 * Any other -ve value means that this is the last freq change i.e. a
+	 * freq change was requested but the thread has not run yet and
+	 * meanwhile the governor was stopped.
+	 */
+};
+
 struct dcvs_core {
+	spinlock_t	idle_state_change_lock;
+	/* 0 when not idle (busy)  1 when idle and -1 when governor starts and
+	 * we dont know whether the next call is going to be idle enter or exit
+	 */
+	int		idle_entered;
+
+	enum msm_dcvs_core_type type;
+	/* this is the number in each type for example cpu 0,1,2 and gpu 0,1 */
+	int type_core_num;
 	char core_name[CORE_NAME_MAX];
-	uint32_t new_freq[MAX_PENDING];
 	uint32_t actual_freq;
 	uint32_t freq_change_us;
 
-	uint32_t max_time_us; 
+	uint32_t max_time_us; /* core param */
 
 	struct msm_dcvs_algo_param algo_param;
-	struct msm_dcvs_idle *idle_driver;
-	struct msm_dcvs_freq *freq_driver;
+	struct msm_dcvs_energy_curve_coeffs coeffs;
 
-	
-	int64_t time_start;
-	struct mutex lock;
-	spinlock_t cpu_lock;
+	/* private */
+	ktime_t time_start;
 	struct task_struct *task;
 	struct core_attribs attrib;
-	uint32_t handle;
-	uint32_t group_id;
-	uint32_t freq_pending;
-	struct hrtimer timer;
-	int32_t timer_disabled;
-	
-	int32_t change_freq_activated;
+	uint32_t dcvs_core_id;
+	struct msm_dcvs_core_info *info;
+	int sensor;
+	wait_queue_head_t wait_q;
+
+	int (*set_frequency)(int type_core_num, unsigned int freq);
+	unsigned int (*get_frequency)(int type_core_num);
+	int (*idle_enable)(int type_core_num,
+			enum msm_core_control_event event);
+	int (*set_floor_frequency)(int type_core_num, unsigned int freq);
+
+	spinlock_t	pending_freq_lock;
+	int pending_freq;
+
+	struct hrtimer	slack_timer;
+	struct delayed_work	temperature_work;
+	int flags;
 };
 
-static int msm_dcvs_debug;
 static int msm_dcvs_enabled = 1;
 module_param_named(enable, msm_dcvs_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP);
 
-static struct dentry *debugfs_base;
+static struct dentry		*debugfs_base;
 
 static struct dcvs_core core_list[CORES_MAX];
-static DEFINE_MUTEX(core_list_lock);
 
 static struct kobject *cores_kobj;
-static struct dcvs_core *core_handles[CORES_MAX];
+
+#define DCVS_MAX_NUM_FREQS 15
+static struct msm_dcvs_freq_entry cpu_freq_tbl[DCVS_MAX_NUM_FREQS];
+static unsigned num_cpu_freqs;
+static struct msm_dcvs_platform_data *dcvs_pdata;
+
+static DEFINE_MUTEX(param_update_mutex);
+static DEFINE_MUTEX(gpu_floor_mutex);
+
+static void force_stop_slack_timer(struct dcvs_core *core)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags);
+	hrtimer_cancel(&core->slack_timer);
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
+}
+
+static void force_start_slack_timer(struct dcvs_core *core, int slack_us)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags);
+
+	/*
+	 * only start the timer if governor is not stopped
+	 */
+	if (slack_us != 0) {
+		ret = hrtimer_start(&core->slack_timer,
+				ktime_set(0, slack_us * 1000),
+				HRTIMER_MODE_REL_PINNED);
+		if (ret) {
+			pr_err("%s Failed to start timer ret = %d\n",
+					core->core_name, ret);
+		}
+	}
+
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
+}
+
+static void stop_slack_timer(struct dcvs_core *core)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags);
+	/* err only for cpu type's GPU's can do idle exit consecutively */
+	if (core->idle_entered == 1 && !(core->dcvs_core_id >= GPU_OFFSET))
+		__err("%s trying to reenter idle", core->core_name);
+	core->idle_entered = 1;
+	hrtimer_cancel(&core->slack_timer);
+	core->idle_entered = 1;
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
+}
+
+static void start_slack_timer(struct dcvs_core *core, int slack_us)
+{
+	unsigned long flags1, flags2;
+	int ret;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags2);
+
+	spin_lock_irqsave(&core->pending_freq_lock, flags1);
+
+	/* err only for cpu type's GPU's can do idle enter consecutively */
+	if (core->idle_entered == 0 && !(core->dcvs_core_id >= GPU_OFFSET))
+		__err("%s trying to reexit idle", core->core_name);
+	core->idle_entered = 0;
+	/*
+	 * only start the timer if governor is not stopped
+	 */
+	if (slack_us != 0
+		&& !(core->pending_freq < NO_OUTSTANDING_FREQ_CHANGE)) {
+		ret = hrtimer_start(&core->slack_timer,
+				ktime_set(0, slack_us * 1000),
+				HRTIMER_MODE_REL_PINNED);
+		if (ret) {
+			pr_err("%s Failed to start timer ret = %d\n",
+					core->core_name, ret);
+		}
+	}
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags1);
+
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags2);
+}
+
+static void restart_slack_timer(struct dcvs_core *core, int slack_us)
+{
+	unsigned long flags1, flags2;
+	int ret;
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags2);
+
+	hrtimer_cancel(&core->slack_timer);
+
+	spin_lock_irqsave(&core->pending_freq_lock, flags1);
+
+	/*
+	 * only start the timer if idle is not entered
+	 * and governor is not stopped
+	 */
+	if (slack_us != 0 && (core->idle_entered != 1)
+		&& !(core->pending_freq < NO_OUTSTANDING_FREQ_CHANGE)) {
+		ret = hrtimer_start(&core->slack_timer,
+				ktime_set(0, slack_us * 1000),
+				HRTIMER_MODE_REL_PINNED);
+		if (ret) {
+			pr_err("%s Failed to start timer ret = %d\n",
+					core->core_name, ret);
+		}
+	}
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags1);
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags2);
+}
+
+void msm_dcvs_apply_gpu_floor(unsigned long cpu_freq)
+{
+	static unsigned long curr_cpu0_freq;
+	unsigned long gpu_floor_freq = 0;
+	struct dcvs_core *gpu;
+	int i;
+
+	if (!dcvs_pdata)
+		return;
+
+	mutex_lock(&gpu_floor_mutex);
+
+	if (cpu_freq)
+		curr_cpu0_freq = cpu_freq;
+
+	for (i = 0; i < dcvs_pdata->num_sync_rules; i++)
+		if (curr_cpu0_freq > dcvs_pdata->sync_rules[i].cpu_khz) {
+			gpu_floor_freq =
+				dcvs_pdata->sync_rules[i].gpu_floor_khz;
+			break;
+		}
+
+	if (num_online_cpus() > 1)
+		gpu_floor_freq = max(gpu_floor_freq,
+				     dcvs_pdata->gpu_max_nom_khz);
+
+	if (!gpu_floor_freq) {
+		mutex_unlock(&gpu_floor_mutex);
+		return;
+	}
+
+	for (i = GPU_OFFSET; i < CORES_MAX; i++) {
+		gpu = &core_list[i];
+		if (gpu->dcvs_core_id == -1)
+			continue;
+
+		if (gpu->pending_freq != STOP_FREQ_CHANGE &&
+		    gpu->set_floor_frequency) {
+			gpu->set_floor_frequency(gpu->type_core_num,
+						 gpu_floor_freq);
+			/* TZ will know about a freq change (if any)
+			 * at next idle exit. */
+			gpu->actual_freq =
+				gpu->get_frequency(gpu->type_core_num);
+		}
+	}
+
+	mutex_unlock(&gpu_floor_mutex);
+}
+
+static void check_power_collapse_modes(struct dcvs_core *core)
+{
+	struct msm_dcvs_algo_param *params;
+
+	params = &core_list[CPU_OFFSET + num_online_cpus() - 1].algo_param;
+
+	if (core->actual_freq >= params->disable_pc_threshold)
+		core->idle_enable(core->type_core_num,
+				  MSM_DCVS_DISABLE_HIGH_LATENCY_MODES);
+	else
+		core->idle_enable(core->type_core_num,
+				  MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
+}
 
 static int __msm_dcvs_change_freq(struct dcvs_core *core)
 {
 	int ret = 0;
 	unsigned long flags = 0;
-	unsigned int requested_freq = 0;
-	unsigned int prev_freq = 0;
-	int64_t time_start = 0;
-	int64_t time_end = 0;
+	int requested_freq = 0;
+	ktime_t time_start;
 	uint32_t slack_us = 0;
 	uint32_t ret1 = 0;
 
-	if (!core->freq_driver || !core->freq_driver->set_frequency) {
-		
-		return -ENODEV;
-	}
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
+	if (core->pending_freq == STOP_FREQ_CHANGE)
+		goto out;
 repeat:
-	spin_lock_irqsave(&core->cpu_lock, flags);
-	if (unlikely(!core->freq_pending)) {
-		spin_unlock_irqrestore(&core->cpu_lock, flags);
-		return ret;
-	}
-	requested_freq = core->new_freq[core->freq_pending - 1];
-	if (unlikely(core->freq_pending > 1) &&
-		(msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)) {
-		int i;
-		for (i = 0; i < core->freq_pending - 1; i++) {
-			__info("Core %s missing freq %u\n",
-				core->core_name, core->new_freq[i]);
-		}
-	}
+	BUG_ON(!core->pending_freq);
+
+	requested_freq = core->pending_freq;
 	time_start = core->time_start;
-	core->time_start = 0;
-	core->freq_pending = 0;
-	hrtimer_cancel(&core->timer);
-	core->timer_disabled = 1;
-	spin_unlock_irqrestore(&core->cpu_lock, flags);
+	core->time_start = ns_to_ktime(0);
+
+	core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
 
 	if (requested_freq == core->actual_freq)
-		return ret;
+		goto out;
 
-	ret = core->freq_driver->set_frequency(core->freq_driver,
-				requested_freq);
-	if (ret <= 0) {
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+
+	if (core->type == MSM_DCVS_CORE_TYPE_CPU &&
+	    core->type_core_num == 0)
+		msm_dcvs_apply_gpu_floor(requested_freq);
+
+	/**
+	 * Call the frequency sink driver to change the frequency
+	 * We will need to get back the actual frequency in KHz and
+	 * the record the time taken to change it.
+	 */
+	ret = core->set_frequency(core->type_core_num, requested_freq);
+	if (ret <= 0)
 		__err("Core %s failed to set freq %u\n",
 				core->core_name, requested_freq);
-		
-	} else {
-		prev_freq = core->actual_freq;
+		/* continue to call TZ to get updated slack timer */
+	else
 		core->actual_freq = ret;
+
+	core->freq_change_us = (uint32_t)ktime_to_us(
+					ktime_sub(ktime_get(), time_start));
+
+	if (core->type == MSM_DCVS_CORE_TYPE_CPU &&
+	    core->type_core_num == 0) {
+		mutex_lock(&param_update_mutex);
+		check_power_collapse_modes(core);
+		mutex_unlock(&param_update_mutex);
 	}
 
-	time_end = ktime_to_ns(ktime_get());
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)
-		__info("Core %s Time end %llu Time start: %llu\n",
-			core->core_name, time_end, time_start);
-	time_end -= time_start;
-	do_div(time_end, NSEC_PER_USEC);
-	core->freq_change_us = (uint32_t)time_end;
-
-	if (core->actual_freq >
-			core->algo_param.disable_pc_threshold) {
-		core->idle_driver->enable(core->idle_driver,
-				MSM_DCVS_DISABLE_HIGH_LATENCY_MODES);
-		if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-			__info("Disabling LPM for %s\n", core->core_name);
-	} else if (core->actual_freq <=
-			core->algo_param.disable_pc_threshold) {
-		core->idle_driver->enable(core->idle_driver,
-				MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
-		if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-			__info("Enabling LPM for %s\n", core->core_name);
+	/**
+	 * Update algorithm with new freq and time taken to change
+	 * to this frequency and that will get us the new slack
+	 * timer
+	 */
+	ret = msm_dcvs_scm_event(core->dcvs_core_id,
+			MSM_DCVS_SCM_CLOCK_FREQ_UPDATE,
+			core->actual_freq, core->freq_change_us,
+			&slack_us, &ret1);
+	if (ret) {
+		__err("Error sending core (%s) dcvs_core_id = %d freq change (%u) reqfreq = %d slack_us=%d ret = %d\n",
+				core->core_name, core->dcvs_core_id,
+				core->actual_freq, requested_freq,
+				slack_us, ret);
 	}
 
-	ret = msm_dcvs_scm_event(core->handle, MSM_DCVS_SCM_CLOCK_FREQ_UPDATE,
-		core->actual_freq, (uint32_t)time_end, &slack_us, &ret1);
-	if (!ret) {
-		
-		if (slack_us) {
-			core->timer_disabled = 0;
-			ret = hrtimer_start(&core->timer,
-				ktime_set(0, slack_us * 1000),
-				HRTIMER_MODE_REL_PINNED);
-			if (ret)
-				__err("Failed to register timer for core %s\n",
-						core->core_name);
-		}
-	} else {
-		__err("Error sending core (%s) freq change (%u)\n",
-				core->core_name, core->actual_freq);
-	}
+	/* TODO confirm that we get a valid freq from SM even when the above
+	 * FREQ_UPDATE fails
+	 */
+	restart_slack_timer(core, slack_us);
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
 
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)
-		__info("Freq %u requested for core %s (actual %u prev %u) "
-			"change time %u us slack time %u us\n",
-			requested_freq, core->core_name,
-			core->actual_freq, prev_freq,
-			core->freq_change_us, slack_us);
-
-	if (core->freq_pending)
+	/**
+	 * By the time we are done with freq changes, we could be asked to
+	 * change again. Check before exiting.
+	 */
+	if (core->pending_freq != NO_OUTSTANDING_FREQ_CHANGE
+		&& core->pending_freq != STOP_FREQ_CHANGE) {
 		goto repeat;
+	}
 
-	core->change_freq_activated = 0;
+out:   /* should always be jumped to with the spin_lock held */
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+
 	return ret;
 }
 
+static void msm_dcvs_report_temp_work(struct work_struct *work)
+{
+	struct dcvs_core *core = container_of(work,
+					struct dcvs_core,
+					temperature_work.work);
+	struct msm_dcvs_core_info *info = core->info;
+	struct tsens_device tsens_dev;
+	int ret;
+	unsigned long temp = 0;
+	int interval_ms;
+
+	if (!(core->flags & CORE_FLAG_TEMP_UPDATE))
+		return;
+
+	tsens_dev.sensor_num = core->sensor;
+	ret = tsens_get_temp(&tsens_dev, &temp);
+	if (!temp) {
+		tsens_dev.sensor_num = 0;
+		ret = tsens_get_temp(&tsens_dev, &temp);
+		if (!temp)
+			goto out;
+	}
+
+	if (temp == info->power_param.current_temp)
+		goto out;
+	info->power_param.current_temp = temp;
+
+	ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id,
+			&info->power_param,
+			&info->freq_tbl[0], &core->coeffs);
+out:
+	if (info->thermal_poll_ms == 0)
+		interval_ms = 60000;
+	else if (info->thermal_poll_ms < 1000)
+		interval_ms = 1000;
+	else
+		interval_ms = info->thermal_poll_ms;
+
+	schedule_delayed_work(&core->temperature_work,
+			msecs_to_jiffies(interval_ms));
+}
+
 static int msm_dcvs_do_freq(void *data)
 {
 	struct dcvs_core *core = (struct dcvs_core *)data;
-	static struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1};
-
-	sched_setscheduler(current, SCHED_FIFO, &param);
-	set_current_state(TASK_UNINTERRUPTIBLE);
 
 	while (!kthread_should_stop()) {
-		mutex_lock(&core->lock);
-		__msm_dcvs_change_freq(core);
-		mutex_unlock(&core->lock);
-
-		schedule();
+		wait_event(core->wait_q, !(core->pending_freq == 0 ||
+					  core->pending_freq == -1) ||
+					  kthread_should_stop());
 
 		if (kthread_should_stop())
 			break;
 
-		set_current_state(TASK_UNINTERRUPTIBLE);
+		__msm_dcvs_change_freq(core);
 	}
 
-	__set_current_state(TASK_RUNNING);
-
 	return 0;
 }
 
+/* freq_pending_lock should be held */
+static void request_freq_change(struct dcvs_core *core, int new_freq)
+{
+	if (new_freq == NO_OUTSTANDING_FREQ_CHANGE) {
+		if (core->pending_freq != STOP_FREQ_CHANGE) {
+			__err("%s gov started with earlier pending freq %d\n",
+					core->core_name, core->pending_freq);
+		}
+		core->pending_freq = NO_OUTSTANDING_FREQ_CHANGE;
+		return;
+	}
+
+	if (new_freq == STOP_FREQ_CHANGE) {
+		core->pending_freq = STOP_FREQ_CHANGE;
+		return;
+	}
+
+	if (core->pending_freq < 0) {
+		/* a value less than 0 means that the governor has stopped
+		 * and no more freq changes should be requested
+		 */
+		return;
+	}
+
+	if (core->actual_freq != new_freq && core->pending_freq != new_freq) {
+		core->pending_freq = new_freq;
+		core->time_start = ktime_get();
+		wake_up(&core->wait_q);
+	}
+}
+
 static int msm_dcvs_update_freq(struct dcvs_core *core,
 		enum msm_dcvs_scm_event event, uint32_t param0,
-		uint32_t *ret1, int *freq_changed)
+		uint32_t *ret1)
 {
 	int ret = 0;
 	unsigned long flags = 0;
-	uint32_t new_freq = 0;
+	uint32_t new_freq = -EINVAL;
 
-	spin_lock_irqsave(&core->cpu_lock, flags);
-	ret = msm_dcvs_scm_event(core->handle, event, param0,
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
+
+	ret = msm_dcvs_scm_event(core->dcvs_core_id, event, param0,
 				core->actual_freq, &new_freq, ret1);
 	if (ret) {
-		__err("Error (%d) sending SCM event %d for core %s\n",
+		if (ret == -13)
+			ret = 0;
+		else
+			__err("Error (%d) sending SCM event %d for core %s\n",
 				ret, event, core->core_name);
-		goto freq_done;
+		goto out;
 	}
 
-	if ((core->actual_freq != new_freq) &&
-			(core->new_freq[core->freq_pending] != new_freq)) {
-		if (core->freq_pending >= MAX_PENDING - 1)
-			core->freq_pending = MAX_PENDING - 1;
-		core->new_freq[core->freq_pending++] = new_freq;
-		core->time_start = ktime_to_ns(ktime_get());
-
-		
-		if (!core->task)
-			__err("Uninitialized task for core %s\n",
-					core->core_name);
-		else {
-			if (freq_changed)
-				*freq_changed = 1;
-			core->change_freq_activated = 1;
-			wake_up_process(core->task);
-		}
-	} else {
-		if (freq_changed)
-			*freq_changed = 0;
+	if (new_freq == 0) {
+		/*
+		 * sometimes TZ gives us a 0 freq back,
+		 * do not queue up a request
+		 */
+		goto out;
 	}
-freq_done:
-	spin_unlock_irqrestore(&core->cpu_lock, flags);
+
+	request_freq_change(core, new_freq);
+
+out:
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
 
 	return ret;
 }
@@ -276,15 +545,17 @@
 static enum hrtimer_restart msm_dcvs_core_slack_timer(struct hrtimer *timer)
 {
 	int ret = 0;
-	struct dcvs_core *core = container_of(timer, struct dcvs_core, timer);
+	struct dcvs_core *core = container_of(timer,
+					struct dcvs_core, slack_timer);
 	uint32_t ret1;
-	uint32_t ret2;
 
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_FREQ_CHANGE)
-		__info("Slack timer fired for core %s\n", core->core_name);
-
+	trace_printk("dcvs: Slack timer fired for core=%s\n", core->core_name);
+	/**
+	 * Timer expired, notify TZ
+	 * Dont care about the third arg.
+	 */
 	ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_QOS_TIMER_EXPIRED, 0,
-				   &ret1, &ret2);
+				   &ret1);
 	if (ret)
 		__err("Timer expired for core %s but failed to notify.\n",
 				core->core_name);
@@ -292,6 +563,39 @@
 	return HRTIMER_NORESTART;
 }
 
+int msm_dcvs_update_algo_params(void)
+{
+	static struct msm_dcvs_algo_param curr_params;
+	struct msm_dcvs_algo_param *new_params;
+	int cpu, ret = 0;
+
+	mutex_lock(&param_update_mutex);
+	new_params = &core_list[CPU_OFFSET + num_online_cpus() - 1].algo_param;
+
+	if (memcmp(&curr_params, new_params,
+		   sizeof(struct msm_dcvs_algo_param))) {
+		for_each_possible_cpu(cpu) {
+			struct dcvs_core *core = &core_list[CPU_OFFSET + cpu];
+			ret = msm_dcvs_scm_set_algo_params(CPU_OFFSET + cpu,
+							   new_params);
+			if (ret) {
+				pr_err("scm set algo params failed on cpu %d, ret %d\n",
+				       cpu, ret);
+				mutex_unlock(&param_update_mutex);
+				return ret;
+			}
+			if (cpu == 0)
+				check_power_collapse_modes(core);
+		}
+		memcpy(&curr_params, new_params,
+		       sizeof(struct msm_dcvs_algo_param));
+	}
+
+	mutex_unlock(&param_update_mutex);
+	return ret;
+}
+
+/* Helper functions and macros for sysfs nodes for a core */
 #define CORE_FROM_ATTRIBS(attr, name) \
 	container_of(container_of(attr, struct core_attribs, name), \
 		struct dcvs_core, attrib);
@@ -304,6 +608,28 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", v); \
 }
 
+#define DCVS_PARAM_STORE(_name) \
+static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\
+		struct kobj_attribute *attr, char *buf) \
+{ \
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
+	return snprintf(buf, PAGE_SIZE, "%d\n", core->info->_name); \
+} \
+static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \
+		struct kobj_attribute *attr, const char *buf, size_t count) \
+{ \
+	int ret = 0; \
+	uint32_t val = 0; \
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
+	ret = kstrtouint(buf, 10, &val); \
+	if (ret) { \
+		__err("Invalid input %s for %s\n", buf, __stringify(_name));\
+	} else { \
+		core->info->_name = val; \
+	} \
+	return count; \
+}
+
 #define DCVS_ALGO_PARAM(_name) \
 static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\
 		struct kobj_attribute *attr, char *buf) \
@@ -317,22 +643,48 @@
 	int ret = 0; \
 	uint32_t val = 0; \
 	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
-	mutex_lock(&core->lock); \
 	ret = kstrtouint(buf, 10, &val); \
 	if (ret) { \
 		__err("Invalid input %s for %s\n", buf, __stringify(_name));\
 	} else { \
 		uint32_t old_val = core->algo_param._name; \
 		core->algo_param._name = val; \
-		ret = msm_dcvs_scm_set_algo_params(core->handle, \
-				&core->algo_param); \
+		ret = msm_dcvs_update_algo_params(); \
 		if (ret) { \
 			core->algo_param._name = old_val; \
-			__err("Error(%d) in setting %d for algo param %s\n",\
+		} \
+	} \
+	return count; \
+}
+
+#define DCVS_ENERGY_PARAM(_name) \
+static ssize_t msm_dcvs_attr_##_name##_show(struct kobject *kobj,\
+		struct kobj_attribute *attr, char *buf) \
+{ \
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
+	return snprintf(buf, PAGE_SIZE, "%d\n", core->coeffs._name); \
+} \
+static ssize_t msm_dcvs_attr_##_name##_store(struct kobject *kobj, \
+		struct kobj_attribute *attr, const char *buf, size_t count) \
+{ \
+	int ret = 0; \
+	int32_t val = 0; \
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, _name); \
+	ret = kstrtoint(buf, 10, &val); \
+	if (ret) { \
+		__err("Invalid input %s for %s\n", buf, __stringify(_name));\
+	} else { \
+		int32_t old_val = core->coeffs._name; \
+		core->coeffs._name = val; \
+		ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id, \
+			&core->info->power_param, &core->info->freq_tbl[0], \
+				&core->coeffs); \
+		if (ret) { \
+			core->coeffs._name = old_val; \
+			__err("Error(%d) in setting %d for coeffs param %s\n",\
 					ret, val, __stringify(_name)); \
 		} \
 	} \
-	mutex_unlock(&core->lock); \
 	return count; \
 }
 
@@ -350,27 +702,185 @@
 	core->attrib._name.store = msm_dcvs_attr_##_name##_store; \
 	core->attrib.attrib_group.attrs[i] = &core->attrib._name.attr;
 
-DCVS_PARAM_SHOW(idle_enabled, (core->idle_driver != NULL))
-DCVS_PARAM_SHOW(freq_change_enabled, (core->freq_driver != NULL))
-DCVS_PARAM_SHOW(actual_freq, (core->actual_freq))
+/**
+ * Function declarations for different attributes.
+ * Gets used when setting the attribute show and store parameters.
+ */
 DCVS_PARAM_SHOW(freq_change_us, (core->freq_change_us))
-DCVS_PARAM_SHOW(max_time_us, (core->max_time_us))
 
-DCVS_ALGO_PARAM(slack_time_us)
-DCVS_ALGO_PARAM(scale_slack_time)
-DCVS_ALGO_PARAM(scale_slack_time_pct)
 DCVS_ALGO_PARAM(disable_pc_threshold)
-DCVS_ALGO_PARAM(em_window_size)
+DCVS_ALGO_PARAM(em_win_size_min_us)
+DCVS_ALGO_PARAM(em_win_size_max_us)
 DCVS_ALGO_PARAM(em_max_util_pct)
-DCVS_ALGO_PARAM(ss_window_size)
+DCVS_ALGO_PARAM(group_id)
+DCVS_ALGO_PARAM(max_freq_chg_time_us)
+DCVS_ALGO_PARAM(slack_mode_dynamic)
+DCVS_ALGO_PARAM(slack_time_min_us)
+DCVS_ALGO_PARAM(slack_time_max_us)
+DCVS_ALGO_PARAM(slack_weight_thresh_pct)
+DCVS_ALGO_PARAM(ss_no_corr_below_freq)
+DCVS_ALGO_PARAM(ss_win_size_min_us)
+DCVS_ALGO_PARAM(ss_win_size_max_us)
 DCVS_ALGO_PARAM(ss_util_pct)
-DCVS_ALGO_PARAM(ss_iobusy_conv)
+
+DCVS_ENERGY_PARAM(active_coeff_a)
+DCVS_ENERGY_PARAM(active_coeff_b)
+DCVS_ENERGY_PARAM(active_coeff_c)
+DCVS_ENERGY_PARAM(leakage_coeff_a)
+DCVS_ENERGY_PARAM(leakage_coeff_b)
+DCVS_ENERGY_PARAM(leakage_coeff_c)
+DCVS_ENERGY_PARAM(leakage_coeff_d)
+
+DCVS_PARAM_STORE(thermal_poll_ms)
+
+static ssize_t msm_dcvs_attr_offset_tbl_show(struct kobject *kobj,
+					     struct kobj_attribute *attr,
+					     char *buf)
+{
+	struct msm_dcvs_freq_entry *freq_tbl;
+	char *buf_idx = buf;
+	int i, len;
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, offset_tbl);
+
+	freq_tbl = core->info->freq_tbl;
+	*buf_idx = '\0';
+
+	/* limit the number of frequencies we will print into
+	 * the PAGE_SIZE sysfs show buffer. */
+	if (core->info->power_param.num_freq > 64)
+		return 0;
+
+	for (i = 0; i < core->info->power_param.num_freq; i++) {
+		len = snprintf(buf_idx, 30, "%7d %7d %7d\n",
+			       freq_tbl[i].freq,
+			       freq_tbl[i].active_energy_offset,
+			       freq_tbl[i].leakage_energy_offset);
+		/* buf_idx always points at terminating null */
+		buf_idx += len;
+	}
+	return buf_idx - buf;
+}
+
+static ssize_t msm_dcvs_attr_offset_tbl_store(struct kobject *kobj,
+					      struct kobj_attribute *attr,
+					      const char *buf,
+					      size_t count)
+{
+	struct msm_dcvs_freq_entry *freq_tbl;
+	uint32_t freq, active_energy_offset, leakage_energy_offset;
+	int i, ret;
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, offset_tbl);
+
+	freq_tbl = core->info->freq_tbl;
+
+	ret = sscanf(buf, "%u %u %u",
+		     &freq, &active_energy_offset, &leakage_energy_offset);
+	if (ret != 3) {
+		__err("Invalid input %s for offset_tbl\n", buf);
+		return count;
+	}
+
+	for (i = 0; i < core->info->power_param.num_freq; i++)
+		if (freq_tbl[i].freq == freq) {
+			freq_tbl[i].active_energy_offset =
+				active_energy_offset;
+			freq_tbl[i].leakage_energy_offset =
+				leakage_energy_offset;
+			break;
+		}
+
+	if (i >= core->info->power_param.num_freq) {
+		__err("Invalid frequency for offset_tbl: %d\n", freq);
+		return count;
+	}
+
+	ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id,
+					    &core->info->power_param,
+					    &core->info->freq_tbl[0],
+					    &core->coeffs);
+	if (ret)
+		__err("Error %d in updating active/leakage energy\n", ret);
+
+	return count;
+}
+
+static ssize_t msm_dcvs_attr_freq_tbl_show(struct kobject *kobj,
+					   struct kobj_attribute *attr,
+					   char *buf)
+{
+	struct msm_dcvs_freq_entry *freq_tbl;
+	char *buf_idx = buf;
+	int i, len;
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, freq_tbl);
+
+	freq_tbl = core->info->freq_tbl;
+	*buf_idx = '\0';
+
+	/* limit the number of frequencies we will print into
+	 * the PAGE_SIZE sysfs show buffer. */
+	if (core->info->power_param.num_freq > 64)
+		return 0;
+
+	for (i = 0; i < core->info->power_param.num_freq; i++) {
+		if (freq_tbl[i].is_trans_level) {
+			len = snprintf(buf_idx, 10, "%7d ", freq_tbl[i].freq);
+			/* buf_idx always points at terminating null */
+			buf_idx += len;
+		}
+	}
+	/* overwrite final trailing space with newline */
+	if (buf_idx > buf)
+		*(buf_idx - 1) = '\n';
+
+	return buf_idx - buf;
+}
+
+static ssize_t msm_dcvs_attr_freq_tbl_store(struct kobject *kobj,
+					    struct kobj_attribute *attr,
+					    const char *buf,
+					    size_t count)
+{
+	struct msm_dcvs_freq_entry *freq_tbl;
+	uint32_t freq;
+	int i, ret;
+	struct dcvs_core *core = CORE_FROM_ATTRIBS(attr, freq_tbl);
+
+	freq_tbl = core->info->freq_tbl;
+
+	ret = kstrtouint(buf, 10, &freq);
+	if (ret) {
+		__err("Invalid input %s for freq_tbl\n", buf);
+		return count;
+	}
+
+	for (i = 0; i < core->info->power_param.num_freq; i++)
+		if (freq_tbl[i].freq == freq) {
+			freq_tbl[i].is_trans_level ^= 1;
+			break;
+		}
+
+	if (i >= core->info->power_param.num_freq) {
+		__err("Invalid frequency for freq_tbl: %d\n", freq);
+		return count;
+	}
+
+	ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id,
+					    &core->info->power_param,
+					    &core->info->freq_tbl[0],
+					    &core->coeffs);
+	if (ret) {
+		freq_tbl[i].is_trans_level ^= 1;
+		__err("Error %d in toggling freq %d (orig enable val %d)\n",
+		      ret, freq_tbl[i].freq, freq_tbl[i].is_trans_level);
+	}
+	return count;
+}
 
 static int msm_dcvs_setup_core_sysfs(struct dcvs_core *core)
 {
 	int ret = 0;
 	struct kobject *core_kobj = NULL;
-	const int attr_count = 15;
+	const int attr_count = 26;
 
 	BUG_ON(!cores_kobj);
 
@@ -382,23 +892,36 @@
 		goto done;
 	}
 
-	DCVS_RO_ATTRIB(0, idle_enabled);
-	DCVS_RO_ATTRIB(1, freq_change_enabled);
-	DCVS_RO_ATTRIB(2, actual_freq);
-	DCVS_RO_ATTRIB(3, freq_change_us);
-	DCVS_RO_ATTRIB(4, max_time_us);
+	DCVS_RO_ATTRIB(0, freq_change_us);
 
-	DCVS_RW_ATTRIB(5, slack_time_us);
-	DCVS_RW_ATTRIB(6, scale_slack_time);
-	DCVS_RW_ATTRIB(7, scale_slack_time_pct);
-	DCVS_RW_ATTRIB(8, disable_pc_threshold);
-	DCVS_RW_ATTRIB(9, em_window_size);
-	DCVS_RW_ATTRIB(10, em_max_util_pct);
-	DCVS_RW_ATTRIB(11, ss_window_size);
-	DCVS_RW_ATTRIB(12, ss_util_pct);
-	DCVS_RW_ATTRIB(13, ss_iobusy_conv);
+	DCVS_RW_ATTRIB(1, disable_pc_threshold);
+	DCVS_RW_ATTRIB(2, em_win_size_min_us);
+	DCVS_RW_ATTRIB(3, em_win_size_max_us);
+	DCVS_RW_ATTRIB(4, em_max_util_pct);
+	DCVS_RW_ATTRIB(5, group_id);
+	DCVS_RW_ATTRIB(6, max_freq_chg_time_us);
+	DCVS_RW_ATTRIB(7, slack_mode_dynamic);
+	DCVS_RW_ATTRIB(8, slack_weight_thresh_pct);
+	DCVS_RW_ATTRIB(9, slack_time_min_us);
+	DCVS_RW_ATTRIB(10, slack_time_max_us);
+	DCVS_RW_ATTRIB(11, ss_no_corr_below_freq);
+	DCVS_RW_ATTRIB(12, ss_win_size_min_us);
+	DCVS_RW_ATTRIB(13, ss_win_size_max_us);
+	DCVS_RW_ATTRIB(14, ss_util_pct);
 
-	core->attrib.attrib_group.attrs[14] = NULL;
+	DCVS_RW_ATTRIB(15, active_coeff_a);
+	DCVS_RW_ATTRIB(16, active_coeff_b);
+	DCVS_RW_ATTRIB(17, active_coeff_c);
+	DCVS_RW_ATTRIB(18, leakage_coeff_a);
+	DCVS_RW_ATTRIB(19, leakage_coeff_b);
+	DCVS_RW_ATTRIB(20, leakage_coeff_c);
+	DCVS_RW_ATTRIB(21, leakage_coeff_d);
+	DCVS_RW_ATTRIB(22, thermal_poll_ms);
+
+	DCVS_RW_ATTRIB(23, freq_tbl);
+	DCVS_RW_ATTRIB(24, offset_tbl);
+
+	core->attrib.attrib_group.attrs[25] = NULL;
 
 	core_kobj = kobject_create_and_add(core->core_name, cores_kobj);
 	if (!core_kobj) {
@@ -409,8 +932,6 @@
 	ret = sysfs_create_group(core_kobj, &core->attrib.attrib_group);
 	if (ret)
 		__err("Cannot create core %s attr group\n", core->core_name);
-	else if (msm_dcvs_debug & MSM_DCVS_DEBUG_NOTIFIER)
-		__info("Setting up attributes for core %s\n", core->core_name);
 
 done:
 	if (ret) {
@@ -421,266 +942,329 @@
 	return ret;
 }
 
-static struct dcvs_core *msm_dcvs_get_core(const char *name, int add_to_list)
+static int get_core_offset(enum msm_dcvs_core_type type, int num)
+{
+	int offset = -EINVAL;
+
+	switch (type) {
+	case MSM_DCVS_CORE_TYPE_CPU:
+		offset = CPU_OFFSET + num;
+		BUG_ON(offset >= GPU_OFFSET);
+		break;
+	case MSM_DCVS_CORE_TYPE_GPU:
+		offset = GPU_OFFSET + num;
+		BUG_ON(offset >= CORES_MAX);
+		break;
+	default:
+		BUG();
+	}
+
+	return offset;
+}
+
+/* Return the core and initialize non platform data specific numbers in it */
+static struct dcvs_core *msm_dcvs_add_core(enum msm_dcvs_core_type type,
+								int num)
 {
 	struct dcvs_core *core = NULL;
 	int i;
-	int empty = -1;
+	char name[CORE_NAME_MAX];
 
-	if (!name[0] ||
-		(strnlen(name, CORE_NAME_MAX - 1) == CORE_NAME_MAX - 1))
-		return core;
-
-	mutex_lock(&core_list_lock);
-	for (i = 0; i < CORES_MAX; i++) {
-		core = &core_list[i];
-		if ((empty < 0) && !core->core_name[0]) {
-			empty = i;
-			continue;
-		}
-		if (!strncmp(name, core->core_name, CORE_NAME_MAX))
-			break;
-	}
-
-	
-	if ((i == CORES_MAX) && (empty < 0)) {
-		mutex_unlock(&core_list_lock);
+	i = get_core_offset(type, num);
+	if (i < 0)
 		return NULL;
-	}
 
-	if (i == CORES_MAX && add_to_list) {
-		core = &core_list[empty];
-		strlcpy(core->core_name, name, CORE_NAME_MAX);
-		mutex_init(&core->lock);
-		spin_lock_init(&core->cpu_lock);
-		core->handle = empty + CORE_HANDLE_OFFSET;
-		hrtimer_init(&core->timer,
-				CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
-		core->timer.function = msm_dcvs_core_slack_timer;
-	}
-	mutex_unlock(&core_list_lock);
+	if (type == MSM_DCVS_CORE_TYPE_CPU)
+		snprintf(name, CORE_NAME_MAX, "cpu%d", num);
+	else
+		snprintf(name, CORE_NAME_MAX, "gpu%d", num);
 
+	core = &core_list[i];
+	core->dcvs_core_id = i;
+	strlcpy(core->core_name, name, CORE_NAME_MAX);
+	spin_lock_init(&core->pending_freq_lock);
+	spin_lock_init(&core->idle_state_change_lock);
+	hrtimer_init(&core->slack_timer,
+			CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+	core->slack_timer.function = msm_dcvs_core_slack_timer;
 	return core;
 }
 
-int msm_dcvs_register_core(const char *core_name, uint32_t group_id,
-		struct msm_dcvs_core_info *info)
+/* Return the core if found or add to list if @add_to_list is true */
+static struct dcvs_core *msm_dcvs_get_core(int offset)
+{
+	/* if the handle is still not set bug */
+	BUG_ON(core_list[offset].dcvs_core_id == -1);
+	return &core_list[offset];
+}
+
+void msm_dcvs_register_cpu_freq(uint32_t freq, uint32_t voltage)
+{
+	BUG_ON(freq == 0 || voltage == 0 ||
+	       num_cpu_freqs == DCVS_MAX_NUM_FREQS);
+
+	cpu_freq_tbl[num_cpu_freqs].freq = freq;
+	cpu_freq_tbl[num_cpu_freqs].voltage = voltage;
+
+	num_cpu_freqs++;
+}
+
+int msm_dcvs_register_core(
+	enum msm_dcvs_core_type type,
+	int type_core_num,
+	struct msm_dcvs_core_info *info,
+	int (*set_frequency)(int type_core_num, unsigned int freq),
+	unsigned int (*get_frequency)(int type_core_num),
+	int (*idle_enable)(int type_core_num,
+					enum msm_core_control_event event),
+	int (*set_floor_frequency)(int type_core_num, unsigned int freq),
+	int sensor)
 {
 	int ret = -EINVAL;
+	int offset;
 	struct dcvs_core *core = NULL;
+	uint32_t ret1;
+	uint32_t ret2;
 
-	if (!core_name || !core_name[0])
+	offset = get_core_offset(type, type_core_num);
+	if (offset < 0)
 		return ret;
+	if (core_list[offset].dcvs_core_id != -1)
+		return core_list[offset].dcvs_core_id;
 
-	core = msm_dcvs_get_core(core_name, true);
+	core = msm_dcvs_add_core(type, type_core_num);
 	if (!core)
 		return ret;
 
-	mutex_lock(&core->lock);
-	if (group_id) {
-		ret = msm_dcvs_scm_create_group(group_id);
-		if (ret == -ENOMEM)
-			goto bail;
-	}
-	core->group_id = group_id;
+	core->type = type;
+	core->type_core_num = type_core_num;
+	core->set_frequency = set_frequency;
+	core->get_frequency = get_frequency;
+	core->idle_enable = idle_enable;
+	core->set_floor_frequency = set_floor_frequency;
 
-	core->max_time_us = info->core_param.max_time_us;
+	core->info = info;
+	if (type == MSM_DCVS_CORE_TYPE_CPU) {
+		BUG_ON(num_cpu_freqs == 0);
+		info->freq_tbl = cpu_freq_tbl;
+		info->power_param.num_freq = num_cpu_freqs;
+	}
+
 	memcpy(&core->algo_param, &info->algo_param,
 			sizeof(struct msm_dcvs_algo_param));
 
-	ret = msm_dcvs_scm_register_core(core->handle, group_id,
-			&info->core_param, info->freq_tbl);
-	if (ret)
-		goto bail;
+	memcpy(&core->coeffs, &info->energy_coeffs,
+			sizeof(struct msm_dcvs_energy_curve_coeffs));
 
-	ret = msm_dcvs_scm_set_algo_params(core->handle, &info->algo_param);
+	/*
+	 * The tz expects cpu0 to represent bit 0 in the mask, however the
+	 * dcvs_core_id needs to start from 1, dcvs_core_id = 0 is used to
+	 * indicate that this request is not associated with any core.
+	 * mpdecision
+	 */
+	info->core_param.core_bitmask_id
+				= 1 << (core->dcvs_core_id - CPU_OFFSET);
+	core->sensor = sensor;
+
+	ret = msm_dcvs_scm_register_core(core->dcvs_core_id, &info->core_param);
+	if (ret) {
+		__err("%s: scm register core fail handle = %d ret = %d\n",
+					__func__, core->dcvs_core_id, ret);
+		goto bail;
+	}
+
+	ret = msm_dcvs_scm_set_algo_params(core->dcvs_core_id,
+							&info->algo_param);
+	if (ret) {
+		__err("%s: scm algo params failed ret = %d\n", __func__, ret);
+		goto bail;
+	}
+
+	ret = msm_dcvs_scm_set_power_params(core->dcvs_core_id,
+				&info->power_param,
+				&info->freq_tbl[0], &core->coeffs);
+	if (ret) {
+		__err("%s: scm power params failed ret = %d\n", __func__, ret);
+		goto bail;
+	}
+
+	ret = msm_dcvs_scm_event(core->dcvs_core_id, MSM_DCVS_SCM_CORE_ONLINE,
+				core->actual_freq, 0, &ret1, &ret2);
 	if (ret)
 		goto bail;
 
 	ret = msm_dcvs_setup_core_sysfs(core);
 	if (ret) {
 		__err("Unable to setup core %s sysfs\n", core->core_name);
-		core_handles[core->handle - CORE_HANDLE_OFFSET] = NULL;
 		goto bail;
 	}
-
-bail:
-	mutex_unlock(&core->lock);
+	core->idle_entered = -1;
+	init_waitqueue_head(&core->wait_q);
+	core->task = kthread_run(msm_dcvs_do_freq, (void *)core,
+			"msm_dcvs/%d", core->dcvs_core_id);
+	ret = core->dcvs_core_id;
 	return ret;
+bail:
+	core->dcvs_core_id = -1;
+	return -EINVAL;
 }
 EXPORT_SYMBOL(msm_dcvs_register_core);
 
-int msm_dcvs_freq_sink_register(struct msm_dcvs_freq *drv)
+void msm_dcvs_update_limits(int dcvs_core_id)
+{
+	struct dcvs_core *core;
+
+	if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) {
+		__err("%s invalid dcvs_core_id = %d returning -EINVAL\n",
+				__func__, dcvs_core_id);
+		return;
+	}
+
+	core = msm_dcvs_get_core(dcvs_core_id);
+	core->actual_freq = core->get_frequency(core->type_core_num);
+}
+
+int msm_dcvs_freq_sink_start(int dcvs_core_id)
 {
 	int ret = -EINVAL;
 	struct dcvs_core *core = NULL;
 	uint32_t ret1;
-	uint32_t ret2;
+	unsigned long flags;
+	int new_freq;
+	int timer_interval_us;
 
-	if (!drv || !drv->core_name)
-		return ret;
+	if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) {
+		__err("%s invalid dcvs_core_id = %d returning -EINVAL\n",
+				__func__, dcvs_core_id);
+		return -EINVAL;
+	}
 
-	core = msm_dcvs_get_core(drv->core_name, true);
+	core = msm_dcvs_get_core(dcvs_core_id);
 	if (!core)
 		return ret;
 
-	mutex_lock(&core->lock);
-	if (core->freq_driver && (msm_dcvs_debug & MSM_DCVS_DEBUG_NOTIFIER))
-		__info("Frequency notifier for %s being replaced\n",
-				core->core_name);
-	core->freq_driver = drv;
-	core->task = kthread_create(msm_dcvs_do_freq, (void *)core,
-			"msm_dcvs/%d", core->handle);
-	if (IS_ERR(core->task)) {
-		mutex_unlock(&core->lock);
-		return -EFAULT;
+	core->actual_freq = core->get_frequency(core->type_core_num);
+
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
+	/* mark that we are ready to accept new frequencies */
+	request_freq_change(core, NO_OUTSTANDING_FREQ_CHANGE);
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+
+	spin_lock_irqsave(&core->idle_state_change_lock, flags);
+	core->idle_entered = -1;
+	spin_unlock_irqrestore(&core->idle_state_change_lock, flags);
+
+	/* Notify TZ to start receiving idle info for the core */
+	ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_DCVS_ENABLE, 1, &ret1);
+
+	ret = msm_dcvs_scm_event(
+		core->dcvs_core_id, MSM_DCVS_SCM_CORE_ONLINE, core->actual_freq,
+		0, &new_freq, &timer_interval_us);
+	if (ret)
+		__err("Error (%d) DCVS sending online for %s\n",
+				ret, core->core_name);
+
+	if (new_freq != 0) {
+		spin_lock_irqsave(&core->pending_freq_lock, flags);
+		request_freq_change(core, new_freq);
+		spin_unlock_irqrestore(&core->pending_freq_lock, flags);
 	}
+	force_start_slack_timer(core, timer_interval_us);
 
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-		__info("Enabling idle pulse for %s\n", core->core_name);
+	core->flags |= CORE_FLAG_TEMP_UPDATE;
+	INIT_DELAYED_WORK(&core->temperature_work, msm_dcvs_report_temp_work);
+	schedule_delayed_work(&core->temperature_work,
+			      msecs_to_jiffies(core->info->thermal_poll_ms));
 
-	if (core->idle_driver) {
-		core->actual_freq = core->freq_driver->get_frequency(drv);
-		
-		ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_ENABLE_CORE, 1,
-					   &ret1, &ret2);
-		core->idle_driver->enable(core->idle_driver,
-				MSM_DCVS_ENABLE_IDLE_PULSE);
-	}
-
-	mutex_unlock(&core->lock);
-
-	return core->handle;
+	core->idle_enable(core->type_core_num, MSM_DCVS_ENABLE_IDLE_PULSE);
+	return 0;
 }
-EXPORT_SYMBOL(msm_dcvs_freq_sink_register);
+EXPORT_SYMBOL(msm_dcvs_freq_sink_start);
 
-int msm_dcvs_freq_sink_unregister(struct msm_dcvs_freq *drv)
+int msm_dcvs_freq_sink_stop(int dcvs_core_id)
 {
 	int ret = -EINVAL;
 	struct dcvs_core *core = NULL;
 	uint32_t ret1;
-	uint32_t ret2;
+	uint32_t freq;
+	unsigned long flags;
 
-	if (!drv || !drv->core_name)
-		return ret;
-
-	core = msm_dcvs_get_core(drv->core_name, false);
-	if (!core)
-		return ret;
-
-	mutex_lock(&core->lock);
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-		__info("Disabling idle pulse for %s\n", core->core_name);
-	if (core->idle_driver) {
-		core->idle_driver->enable(core->idle_driver,
-				MSM_DCVS_DISABLE_IDLE_PULSE);
-		
-		ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_ENABLE_CORE, 0,
-					   &ret1, &ret2);
-		hrtimer_cancel(&core->timer);
-		core->idle_driver->enable(core->idle_driver,
-				MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
-		if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-			__info("Enabling LPM for %s\n", core->core_name);
+	if (dcvs_core_id < 0 || dcvs_core_id > CORES_MAX) {
+		pr_err("%s invalid dcvs_core_id = %d returning -EINVAL\n",
+				__func__, dcvs_core_id);
+		return -EINVAL;
 	}
-	core->freq_pending = 0;
-	core->freq_driver = NULL;
-	mutex_unlock(&core->lock);
-	kthread_stop(core->task);
+
+	core = msm_dcvs_get_core(dcvs_core_id);
+	if (!core) {
+		__err("couldn't find core for coreid = %d\n", dcvs_core_id);
+		return ret;
+	}
+
+	core->flags &= ~CORE_FLAG_TEMP_UPDATE;
+	cancel_delayed_work(&core->temperature_work);
+
+	core->idle_enable(core->type_core_num, MSM_DCVS_DISABLE_IDLE_PULSE);
+	/* Notify TZ to stop receiving idle info for the core */
+	ret = msm_dcvs_scm_event(core->dcvs_core_id, MSM_DCVS_SCM_DCVS_ENABLE,
+				0, core->actual_freq, &freq, &ret1);
+	core->idle_enable(core->type_core_num,
+			MSM_DCVS_ENABLE_HIGH_LATENCY_MODES);
+
+	if (core->type == MSM_DCVS_CORE_TYPE_GPU)
+		mutex_lock(&gpu_floor_mutex);
+
+	spin_lock_irqsave(&core->pending_freq_lock, flags);
+	/* flush out all the pending freq changes */
+	request_freq_change(core, STOP_FREQ_CHANGE);
+	spin_unlock_irqrestore(&core->pending_freq_lock, flags);
+
+	if (core->type == MSM_DCVS_CORE_TYPE_GPU)
+		mutex_unlock(&gpu_floor_mutex);
+
+	force_stop_slack_timer(core);
 
 	return 0;
 }
-EXPORT_SYMBOL(msm_dcvs_freq_sink_unregister);
+EXPORT_SYMBOL(msm_dcvs_freq_sink_stop);
 
-int msm_dcvs_idle_source_register(struct msm_dcvs_idle *drv)
-{
-	int ret = -EINVAL;
-	struct dcvs_core *core = NULL;
-
-	if (!drv || !drv->core_name)
-		return ret;
-
-	core = msm_dcvs_get_core(drv->core_name, true);
-	if (!core)
-		return ret;
-
-	mutex_lock(&core->lock);
-	if (core->idle_driver && (msm_dcvs_debug & MSM_DCVS_DEBUG_NOTIFIER))
-		__info("Idle notifier for %s being replaced\n",
-				core->core_name);
-	core->idle_driver = drv;
-	mutex_unlock(&core->lock);
-
-	return core->handle;
-}
-EXPORT_SYMBOL(msm_dcvs_idle_source_register);
-
-int msm_dcvs_idle_source_unregister(struct msm_dcvs_idle *drv)
-{
-	int ret = -EINVAL;
-	struct dcvs_core *core = NULL;
-
-	if (!drv || !drv->core_name)
-		return ret;
-
-	core = msm_dcvs_get_core(drv->core_name, false);
-	if (!core)
-		return ret;
-
-	mutex_lock(&core->lock);
-	core->idle_driver = NULL;
-	mutex_unlock(&core->lock);
-
-	return 0;
-}
-EXPORT_SYMBOL(msm_dcvs_idle_source_unregister);
-
-int msm_dcvs_idle(int handle, enum msm_core_idle_state state, uint32_t iowaited)
+int msm_dcvs_idle(int dcvs_core_id, enum msm_core_idle_state state,
+						uint32_t iowaited)
 {
 	int ret = 0;
 	struct dcvs_core *core = NULL;
 	uint32_t timer_interval_us = 0;
 	uint32_t r0, r1;
-	uint32_t freq_changed = 0;
 
-	if (handle >= CORE_HANDLE_OFFSET &&
-			(handle - CORE_HANDLE_OFFSET) < CORES_MAX)
-		core = &core_list[handle - CORE_HANDLE_OFFSET];
+	if (dcvs_core_id < CPU_OFFSET || dcvs_core_id > CORES_MAX) {
+		pr_err("invalid dcvs_core_id = %d ret -EINVAL\n", dcvs_core_id);
+		return -EINVAL;
+	}
 
-	BUG_ON(!core);
-
-	if (msm_dcvs_debug & MSM_DCVS_DEBUG_IDLE_PULSE)
-		__info("Core %s idle state %d\n", core->core_name, state);
+	core = msm_dcvs_get_core(dcvs_core_id);
 
 	switch (state) {
 	case MSM_DCVS_IDLE_ENTER:
-		hrtimer_cancel(&core->timer);
-		ret = msm_dcvs_scm_event(core->handle,
+		stop_slack_timer(core);
+		ret = msm_dcvs_scm_event(core->dcvs_core_id,
 				MSM_DCVS_SCM_IDLE_ENTER, 0, 0, &r0, &r1);
-		if (ret)
+		if (ret < 0 && ret != -13)
 			__err("Error (%d) sending idle enter for %s\n",
 					ret, core->core_name);
+		trace_msm_dcvs_idle("idle_enter_exit", core->core_name, 1);
 		break;
 
 	case MSM_DCVS_IDLE_EXIT:
-		hrtimer_cancel(&core->timer);
 		ret = msm_dcvs_update_freq(core, MSM_DCVS_SCM_IDLE_EXIT,
-				iowaited, &timer_interval_us, &freq_changed);
+						iowaited, &timer_interval_us);
 		if (ret)
 			__err("Error (%d) sending idle exit for %s\n",
 					ret, core->core_name);
-		
-		if (freq_changed || core->change_freq_activated)
-			break;
-		if (timer_interval_us && !core->timer_disabled) {
-			ret = hrtimer_start(&core->timer,
-				ktime_set(0, timer_interval_us * 1000),
-				HRTIMER_MODE_REL_PINNED);
-
-			if (ret)
-				__err("Failed to register timer for core %s\n",
-				      core->core_name);
-		}
+		start_slack_timer(core, timer_interval_us);
+		trace_msm_dcvs_idle("idle_enter_exit", core->core_name, 0);
+		trace_msm_dcvs_iowait("iowait", core->core_name, iowaited);
+		trace_msm_dcvs_slack_time("slack_timer_dcvs", core->core_name,
+							timer_interval_us);
 		break;
 	}
 
@@ -715,13 +1299,6 @@
 		goto err;
 	}
 
-	if (!debugfs_create_u32("debug_mask", S_IRUGO | S_IWUSR,
-				debugfs_base, &msm_dcvs_debug)) {
-		__err("Cannot create debugfs entry %s\n", "debug_mask");
-		ret = -ENOMEM;
-		goto err;
-	}
-
 err:
 	if (ret) {
 		kobject_del(cores_kobj);
@@ -733,19 +1310,47 @@
 }
 late_initcall(msm_dcvs_late_init);
 
+static int __devinit dcvs_probe(struct platform_device *pdev)
+{
+	if (pdev->dev.platform_data)
+		dcvs_pdata = pdev->dev.platform_data;
+
+	return 0;
+}
+
+static struct platform_driver dcvs_driver = {
+	.probe = dcvs_probe,
+	.driver = {
+		.name = "dcvs",
+		.owner = THIS_MODULE,
+	},
+};
+
 static int __init msm_dcvs_early_init(void)
 {
 	int ret = 0;
+	int i;
+
+	platform_driver_register(&dcvs_driver);
 
 	if (!msm_dcvs_enabled) {
 		__info("Not enabled (%d)\n", msm_dcvs_enabled);
 		return 0;
 	}
 
-	ret = msm_dcvs_scm_init(10 * 1024);
-	if (ret)
-		__err("Unable to initialize DCVS err=%d\n", ret);
 
+	/* Only need about 32kBytes for normal operation */
+	ret = msm_dcvs_scm_init(SZ_32K);
+	if (ret) {
+		__err("Unable to initialize DCVS err=%d\n", ret);
+		goto done;
+	}
+
+	for (i = 0; i < CORES_MAX; i++) {
+		core_list[i].dcvs_core_id = -1;
+		core_list[i].pending_freq = STOP_FREQ_CHANGE;
+	}
+done:
 	return ret;
 }
 postcore_initcall(msm_dcvs_early_init);
diff --git a/arch/arm/mach-msm/msm_dcvs_idle.c b/arch/arm/mach-msm/msm_dcvs_idle.c
deleted file mode 100644
index 442d53e..0000000
--- a/arch/arm/mach-msm/msm_dcvs_idle.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/cpu_pm.h>
-#include <linux/platform_device.h>
-#include <linux/pm_qos.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
-#include <mach/msm_dcvs.h>
-
-struct cpu_idle_info {
-	int cpu;
-	int enabled;
-	int handle;
-	struct msm_dcvs_idle dcvs_notifier;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_idle_info, cpu_idle_info);
-static DEFINE_PER_CPU_SHARED_ALIGNED(u64, iowait_on_cpu);
-static char core_name[NR_CPUS][10];
-static struct pm_qos_request qos_req;
-static uint32_t latency;
-
-static int msm_dcvs_idle_notifier(struct msm_dcvs_idle *self,
-		enum msm_core_control_event event)
-{
-	struct cpu_idle_info *info = container_of(self,
-				struct cpu_idle_info, dcvs_notifier);
-
-	switch (event) {
-	case MSM_DCVS_ENABLE_IDLE_PULSE:
-		info->enabled = true;
-		break;
-
-	case MSM_DCVS_DISABLE_IDLE_PULSE:
-		info->enabled = false;
-		break;
-
-	case MSM_DCVS_ENABLE_HIGH_LATENCY_MODES:
-		pm_qos_update_request(&qos_req, PM_QOS_DEFAULT_VALUE);
-		break;
-
-	case MSM_DCVS_DISABLE_HIGH_LATENCY_MODES:
-		pm_qos_update_request(&qos_req, latency);
-		break;
-	}
-
-	return 0;
-}
-
-static int msm_cpuidle_notifier(struct notifier_block *self, unsigned long cmd,
-		void *v)
-{
-	struct cpu_idle_info *info =
-		&per_cpu(cpu_idle_info, smp_processor_id());
-	u64 io_wait_us = 0;
-	u64 prev_io_wait_us = 0;
-	u64 last_update_time = 0;
-	u64 val = 0;
-	uint32_t iowaited = 0;
-
-	if (!info->enabled)
-		return NOTIFY_OK;
-
-	switch (cmd) {
-	case CPU_PM_ENTER:
-		val = get_cpu_iowait_time_us(smp_processor_id(),
-					&last_update_time);
-		
-		if (val == (u64)-1)
-			val = 0;
-		per_cpu(iowait_on_cpu, smp_processor_id()) = val;
-		msm_dcvs_idle(info->handle, MSM_DCVS_IDLE_ENTER, 0);
-		break;
-
-	case CPU_PM_ENTER_FAILED:
-	case CPU_PM_EXIT:
-		prev_io_wait_us = per_cpu(iowait_on_cpu, smp_processor_id());
-		val = get_cpu_iowait_time_us(smp_processor_id(),
-				&last_update_time);
-		if (val == (u64)-1)
-			val = 0;
-		io_wait_us = val;
-		iowaited = (io_wait_us - prev_io_wait_us);
-		msm_dcvs_idle(info->handle, MSM_DCVS_IDLE_EXIT, iowaited);
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block idle_nb = {
-	.notifier_call = msm_cpuidle_notifier,
-};
-
-static int msm_dcvs_idle_probe(struct platform_device *pdev)
-{
-	int cpu;
-	struct cpu_idle_info *info = NULL;
-	struct msm_dcvs_idle *inotify = NULL;
-
-	for_each_possible_cpu(cpu) {
-		info = &per_cpu(cpu_idle_info, cpu);
-		info->cpu = cpu;
-		inotify = &info->dcvs_notifier;
-		snprintf(core_name[cpu], 10, "cpu%d", cpu);
-		inotify->core_name = core_name[cpu];
-		inotify->enable = msm_dcvs_idle_notifier;
-		info->handle = msm_dcvs_idle_source_register(inotify);
-		BUG_ON(info->handle < 0);
-	}
-
-	latency = *((uint32_t *)pdev->dev.platform_data);
-	pm_qos_add_request(&qos_req, PM_QOS_CPU_DMA_LATENCY,
-				PM_QOS_DEFAULT_VALUE);
-
-	return cpu_pm_register_notifier(&idle_nb);
-}
-
-static int msm_dcvs_idle_remove(struct platform_device *pdev)
-{
-	int ret = 0;
-	int rc = 0;
-	int cpu = 0;
-	struct msm_dcvs_idle *inotify = NULL;
-	struct cpu_idle_info *info = NULL;
-
-	rc = cpu_pm_unregister_notifier(&idle_nb);
-
-	for_each_possible_cpu(cpu) {
-		info = &per_cpu(cpu_idle_info, cpu);
-		inotify = &info->dcvs_notifier;
-		ret = msm_dcvs_idle_source_unregister(inotify);
-		if (ret) {
-			rc = -EFAULT;
-			pr_err("Error de-registering core %d idle notifier.\n",
-					cpu);
-		}
-	}
-
-	return rc;
-}
-
-static struct platform_driver idle_pdrv = {
-	.probe = msm_dcvs_idle_probe,
-	.remove = __devexit_p(msm_dcvs_idle_remove),
-	.driver = {
-		.name  = "msm_cpu_idle",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int msm_dcvs_idle_init(void)
-{
-	return platform_driver_register(&idle_pdrv);
-}
-late_initcall(msm_dcvs_idle_init);
diff --git a/arch/arm/mach-msm/msm_dcvs_scm.c b/arch/arm/mach-msm/msm_dcvs_scm.c
index 573ffea..78d62ac 100644
--- a/arch/arm/mach-msm/msm_dcvs_scm.c
+++ b/arch/arm/mach-msm/msm_dcvs_scm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,18 +20,17 @@
 #include <mach/memory.h>
 #include <mach/scm.h>
 #include <mach/msm_dcvs_scm.h>
+#include <trace/events/mpdcvs_trace.h>
 
-#define DCVS_CMD_CREATE_GROUP		1
 #define DCVS_CMD_REGISTER_CORE		2
 #define DCVS_CMD_SET_ALGO_PARAM		3
 #define DCVS_CMD_EVENT			4
 #define DCVS_CMD_INIT			5
+#define DCVS_CMD_SET_POWER_PARAM	6
 
 struct scm_register_core {
 	uint32_t core_id;
-	uint32_t group_id;
 	phys_addr_t core_param_phy;
-	phys_addr_t freq_phy;
 };
 
 struct scm_algo {
@@ -44,13 +43,28 @@
 	uint32_t size;
 };
 
+struct scm_pwr_param {
+	uint32_t	core_id;
+	phys_addr_t	pwr_param_phy;
+	phys_addr_t	freq_phy;
+	phys_addr_t	coeffs_phy;
+};
+
+struct msm_algo_param {
+	enum msm_dcvs_algo_param_type		type;
+	union {
+		struct msm_dcvs_algo_param	dcvs_param;
+		struct msm_mpd_algo_param	mpd_param;
+	} u;
+};
+
 int msm_dcvs_scm_init(size_t size)
 {
 	int ret = 0;
 	struct scm_init init;
 	uint32_t p = 0;
 
-	
+	/* Allocate word aligned non-cacheable memory */
 	p = allocate_contiguous_ebi_nomap(size, 4);
 	if (!p)
 		return -ENOMEM;
@@ -61,7 +75,7 @@
 	ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_INIT,
 			&init, sizeof(init), NULL, 0);
 
-	
+	/* Not freed if the initialization succeeds */
 	if (ret)
 		free_contiguous_memory_by_paddr(p);
 
@@ -69,49 +83,25 @@
 }
 EXPORT_SYMBOL(msm_dcvs_scm_init);
 
-int msm_dcvs_scm_create_group(uint32_t id)
-{
-	int ret = 0;
-
-	ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_CREATE_GROUP,
-			&id, sizeof(uint32_t), NULL, 0);
-
-	return ret;
-}
-EXPORT_SYMBOL(msm_dcvs_scm_create_group);
-
-int msm_dcvs_scm_register_core(uint32_t core_id, uint32_t group_id,
-		struct msm_dcvs_core_param *param,
-		struct msm_dcvs_freq_entry *freq)
+int msm_dcvs_scm_register_core(uint32_t core_id,
+		struct msm_dcvs_core_param *param)
 {
 	int ret = 0;
 	struct scm_register_core reg_data;
 	struct msm_dcvs_core_param *p = NULL;
-	struct msm_dcvs_freq_entry *f = NULL;
 
 	p = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_core_param)), GFP_KERNEL);
 	if (!p)
 		return -ENOMEM;
 
-	f = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_freq_entry) *
-				param->num_freq), GFP_KERNEL);
-	if (!f) {
-		kfree(p);
-		return -ENOMEM;
-	}
-
 	memcpy(p, param, sizeof(struct msm_dcvs_core_param));
-	memcpy(f, freq, sizeof(struct msm_dcvs_freq_entry) * param->num_freq);
 
 	reg_data.core_id = core_id;
-	reg_data.group_id = group_id;
 	reg_data.core_param_phy = virt_to_phys(p);
-	reg_data.freq_phy = virt_to_phys(f);
 
 	ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_REGISTER_CORE,
 			&reg_data, sizeof(reg_data), NULL, 0);
 
-	kfree(f);
 	kfree(p);
 
 	return ret;
@@ -123,13 +113,14 @@
 {
 	int ret = 0;
 	struct scm_algo algo;
-	struct msm_dcvs_algo_param *p = NULL;
+	struct msm_algo_param *p = NULL;
 
-	p = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_algo_param)), GFP_KERNEL);
+	p = kzalloc(PAGE_ALIGN(sizeof(struct msm_algo_param)), GFP_KERNEL);
 	if (!p)
 		return -ENOMEM;
 
-	memcpy(p, param, sizeof(struct msm_dcvs_algo_param));
+	p->type = MSM_DCVS_ALGO_DCVS_PARAM;
+	memcpy(&p->u.dcvs_param, param, sizeof(struct msm_dcvs_algo_param));
 
 	algo.core_id = core_id;
 	algo.algo_phy = virt_to_phys(p);
@@ -143,6 +134,85 @@
 }
 EXPORT_SYMBOL(msm_dcvs_scm_set_algo_params);
 
+int msm_mpd_scm_set_algo_params(struct msm_mpd_algo_param *param)
+{
+	int ret = 0;
+	struct scm_algo algo;
+	struct msm_algo_param *p = NULL;
+
+	p = kzalloc(PAGE_ALIGN(sizeof(struct msm_algo_param)), GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+
+	p->type = MSM_DCVS_ALGO_MPD_PARAM;
+	memcpy(&p->u.mpd_param, param, sizeof(struct msm_mpd_algo_param));
+
+	algo.core_id = 0;
+	algo.algo_phy = virt_to_phys(p);
+
+	ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_SET_ALGO_PARAM,
+			&algo, sizeof(algo), NULL, 0);
+
+	kfree(p);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_mpd_scm_set_algo_params);
+
+int msm_dcvs_scm_set_power_params(uint32_t core_id,
+		struct msm_dcvs_power_params *pwr_param,
+		struct msm_dcvs_freq_entry *freq_entry,
+		struct msm_dcvs_energy_curve_coeffs *coeffs)
+{
+	int ret = 0;
+	struct scm_pwr_param pwr;
+	struct msm_dcvs_power_params *pwrt = NULL;
+	struct msm_dcvs_freq_entry *freqt = NULL;
+	struct msm_dcvs_energy_curve_coeffs *coefft = NULL;
+
+	pwrt = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_power_params)),
+			GFP_KERNEL);
+	if (!pwrt)
+		return -ENOMEM;
+
+	freqt = kzalloc(PAGE_ALIGN(sizeof(struct msm_dcvs_freq_entry)
+				* pwr_param->num_freq),
+			GFP_KERNEL);
+	if (!freqt) {
+		kfree(pwrt);
+		return -ENOMEM;
+	}
+
+	coefft = kzalloc(PAGE_ALIGN(
+				sizeof(struct msm_dcvs_energy_curve_coeffs)),
+				GFP_KERNEL);
+	if (!coefft) {
+		kfree(pwrt);
+		kfree(freqt);
+		return -ENOMEM;
+	}
+
+	memcpy(pwrt, pwr_param, sizeof(struct msm_dcvs_power_params));
+	memcpy(freqt, freq_entry,
+			sizeof(struct msm_dcvs_freq_entry)*pwr_param->num_freq);
+	memcpy(coefft, coeffs, sizeof(struct msm_dcvs_energy_curve_coeffs));
+
+	pwr.core_id = core_id;
+	pwr.pwr_param_phy = virt_to_phys(pwrt);
+	pwr.freq_phy = virt_to_phys(freqt);
+	pwr.coeffs_phy = virt_to_phys(coefft);
+
+	ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_SET_POWER_PARAM,
+			&pwr, sizeof(pwr), NULL, 0);
+
+	kfree(pwrt);
+	kfree(freqt);
+	kfree(coefft);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_dcvs_scm_set_power_params);
+
 int msm_dcvs_scm_event(uint32_t core_id,
 		enum msm_dcvs_scm_event event_id,
 		uint32_t param0, uint32_t param1,
@@ -156,6 +226,9 @@
 	ret = scm_call_atomic4_3(SCM_SVC_DCVS, DCVS_CMD_EVENT,
 			core_id, event_id, param0, param1, ret0, ret1);
 
+	trace_msm_dcvs_scm_event(core_id, (int)event_id, param0, param1,
+							*ret0, *ret1);
+
 	return ret;
 }
 EXPORT_SYMBOL(msm_dcvs_scm_event);
diff --git a/arch/arm/mach-msm/msm_mpdecision.c b/arch/arm/mach-msm/msm_mpdecision.c
new file mode 100644
index 0000000..52b9ec3
--- /dev/null
+++ b/arch/arm/mach-msm/msm_mpdecision.c
@@ -0,0 +1,726 @@
+ /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)     "mpd %s: " fmt, __func__
+
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/kobject.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h>
+#include <linux/rq_stats.h>
+#include <asm/atomic.h>
+#include <asm/page.h>
+#include <mach/msm_dcvs.h>
+#include <mach/msm_dcvs_scm.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mpdcvs_trace.h>
+
+#define DEFAULT_RQ_AVG_POLL_MS    (1)
+#define DEFAULT_RQ_AVG_DIVIDE    (25)
+
+struct mpd_attrib {
+	struct kobj_attribute	enabled;
+	struct kobj_attribute	rq_avg_poll_ms;
+	struct kobj_attribute	iowait_threshold_pct;
+
+	struct kobj_attribute	rq_avg_divide;
+	struct kobj_attribute	em_win_size_min_us;
+	struct kobj_attribute	em_win_size_max_us;
+	struct kobj_attribute	em_max_util_pct;
+	struct kobj_attribute	mp_em_rounding_point_min;
+	struct kobj_attribute	mp_em_rounding_point_max;
+	struct kobj_attribute	online_util_pct_min;
+	struct kobj_attribute	online_util_pct_max;
+	struct kobj_attribute	slack_time_min_us;
+	struct kobj_attribute	slack_time_max_us;
+	struct kobj_attribute	hp_up_max_ms;
+	struct kobj_attribute	hp_up_ms;
+	struct kobj_attribute	hp_up_count;
+	struct kobj_attribute	hp_dw_max_ms;
+	struct kobj_attribute	hp_dw_ms;
+	struct kobj_attribute	hp_dw_count;
+	struct attribute_group	attrib_group;
+};
+
+struct msm_mpd_scm_data {
+	enum msm_dcvs_scm_event event;
+	int			nr;
+};
+
+struct mpdecision {
+	uint32_t			enabled;
+	atomic_t			algo_cpu_mask;
+	uint32_t			rq_avg_poll_ms;
+	uint32_t			iowait_threshold_pct;
+	uint32_t			rq_avg_divide;
+	ktime_t				next_update;
+	uint32_t			slack_us;
+	struct msm_mpd_algo_param	mp_param;
+	struct mpd_attrib		attrib;
+	struct mutex			lock;
+	struct task_struct		*task;
+	struct task_struct		*hptask;
+	struct hrtimer			slack_timer;
+	struct msm_mpd_scm_data		data;
+	int				hpupdate;
+	wait_queue_head_t		wait_q;
+	wait_queue_head_t		wait_hpq;
+};
+
+struct hp_latency {
+	int hp_up_max_ms;
+	int hp_up_ms;
+	int hp_up_count;
+	int hp_dw_max_ms;
+	int hp_dw_ms;
+	int hp_dw_count;
+};
+
+static DEFINE_PER_CPU(struct hrtimer, rq_avg_poll_timer);
+static DEFINE_SPINLOCK(rq_avg_lock);
+
+enum {
+	MSM_MPD_DEBUG_NOTIFIER = BIT(0),
+	MSM_MPD_CORE_STATUS = BIT(1),
+	MSM_MPD_SLACK_TIMER = BIT(2),
+};
+
+enum {
+	HPUPDATE_WAITING = 0, /* we are waiting for cpumask update */
+	HPUPDATE_SCHEDULED = 1, /* we are in the process of hotplugging */
+	HPUPDATE_IN_PROGRESS = 2, /* we are in the process of hotplugging */
+};
+
+static int msm_mpd_enabled = 1;
+module_param_named(enabled, msm_mpd_enabled, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static struct dentry *debugfs_base;
+static struct mpdecision msm_mpd;
+
+static struct hp_latency hp_latencies;
+
+static unsigned long last_nr;
+static int num_present_hundreds;
+static ktime_t last_down_time;
+
+static bool ok_to_update_tz(int nr, int last_nr)
+{
+	/*
+	 * Exclude unnecessary TZ reports if run queue haven't changed much from
+	 * the last reported value. The divison by rq_avg_divide is to
+	 * filter out small changes in the run queue average which won't cause
+	 * a online cpu mask change. Also if the cpu online count does not match
+	 * the count requested by TZ and we are not in the process of bringing
+	 * cpus online as indicated by a HPUPDATE_IN_PROGRESS in msm_mpd.hpdata
+	 */
+	return
+	(((nr / msm_mpd.rq_avg_divide)
+				!= (last_nr / msm_mpd.rq_avg_divide))
+	|| ((hweight32(atomic_read(&msm_mpd.algo_cpu_mask))
+				!= num_online_cpus())
+		&& (msm_mpd.hpupdate != HPUPDATE_IN_PROGRESS)));
+}
+
+static enum hrtimer_restart msm_mpd_rq_avg_poll_timer(struct hrtimer *timer)
+{
+	int nr, nr_iowait;
+	ktime_t curr_time = ktime_get();
+	unsigned long flags;
+	int cpu = smp_processor_id();
+	enum hrtimer_restart restart = HRTIMER_RESTART;
+
+	spin_lock_irqsave(&rq_avg_lock, flags);
+	/* If running on the wrong cpu, don't restart */
+	if (&per_cpu(rq_avg_poll_timer, cpu) != timer)
+		restart = HRTIMER_NORESTART;
+
+	if (ktime_to_ns(ktime_sub(curr_time, msm_mpd.next_update)) < 0)
+		goto out;
+
+	msm_mpd.next_update = ktime_add_ns(curr_time,
+			(msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
+
+	sched_get_nr_running_avg(&nr, &nr_iowait);
+
+	if ((nr_iowait >= msm_mpd.iowait_threshold_pct) && (nr < last_nr))
+		nr = last_nr;
+
+	if (nr > num_present_hundreds)
+		nr = num_present_hundreds;
+
+	trace_msm_mp_runq("nr_running", nr);
+
+	if (ok_to_update_tz(nr, last_nr)) {
+		hrtimer_try_to_cancel(&msm_mpd.slack_timer);
+		msm_mpd.data.nr = nr;
+		msm_mpd.data.event = MSM_DCVS_SCM_RUNQ_UPDATE;
+		wake_up(&msm_mpd.wait_q);
+		last_nr = nr;
+	}
+
+out:
+	hrtimer_set_expires(timer, msm_mpd.next_update);
+	spin_unlock_irqrestore(&rq_avg_lock, flags);
+	/* set next expiration */
+	return restart;
+}
+
+static void bring_up_cpu(int cpu)
+{
+	int cpu_action_time_ms;
+	int time_taken_ms;
+	int ret, ret1, ret2;
+
+	cpu_action_time_ms = ktime_to_ms(ktime_get());
+	ret = cpu_up(cpu);
+	if (ret) {
+		pr_debug("Error %d online core %d\n", ret, cpu);
+	} else {
+		time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
+		if (time_taken_ms > hp_latencies.hp_up_max_ms)
+			hp_latencies.hp_up_max_ms = time_taken_ms;
+		hp_latencies.hp_up_ms += time_taken_ms;
+		hp_latencies.hp_up_count++;
+		ret = msm_dcvs_scm_event(
+				CPU_OFFSET + cpu,
+				MSM_DCVS_SCM_CORE_ONLINE,
+				cpufreq_get(cpu),
+				(uint32_t) time_taken_ms * USEC_PER_MSEC,
+				&ret1, &ret2);
+		if (ret)
+			pr_err("Error sending hotplug scm event err=%d\n", ret);
+	}
+}
+
+static void bring_down_cpu(int cpu)
+{
+	int cpu_action_time_ms;
+	int time_taken_ms;
+	int ret, ret1, ret2;
+
+	BUG_ON(cpu == 0);
+	cpu_action_time_ms = ktime_to_ms(ktime_get());
+	ret = cpu_down(cpu);
+	if (ret) {
+		pr_debug("Error %d offline" "core %d\n", ret, cpu);
+	} else {
+		time_taken_ms = ktime_to_ms(ktime_get()) - cpu_action_time_ms;
+		if (time_taken_ms > hp_latencies.hp_dw_max_ms)
+			hp_latencies.hp_dw_max_ms = time_taken_ms;
+		hp_latencies.hp_dw_ms += time_taken_ms;
+		hp_latencies.hp_dw_count++;
+		ret = msm_dcvs_scm_event(
+				CPU_OFFSET + cpu,
+				MSM_DCVS_SCM_CORE_OFFLINE,
+				(uint32_t) time_taken_ms * USEC_PER_MSEC,
+				0,
+				&ret1, &ret2);
+		if (ret)
+			pr_err("Error sending hotplug scm event err=%d\n", ret);
+	}
+}
+
+static int __ref msm_mpd_update_scm(enum msm_dcvs_scm_event event, int nr)
+{
+	int ret = 0;
+	uint32_t req_cpu_mask = 0;
+	uint32_t slack_us = 0;
+	uint32_t param0 = 0;
+
+	if (event == MSM_DCVS_SCM_RUNQ_UPDATE)
+		param0 = nr;
+
+	ret = msm_dcvs_scm_event(0, event, param0, 0,
+				 &req_cpu_mask, &slack_us);
+
+	if (ret) {
+		pr_err("Error (%d) sending event %d, param %d\n", ret, event,
+				param0);
+		return ret;
+	}
+
+	trace_msm_mp_cpusonline("cpu_online_mp", req_cpu_mask);
+	trace_msm_mp_slacktime("slack_time_mp", slack_us);
+	msm_mpd.slack_us = slack_us;
+	atomic_set(&msm_mpd.algo_cpu_mask, req_cpu_mask);
+	msm_mpd.hpupdate = HPUPDATE_SCHEDULED;
+	wake_up(&msm_mpd.wait_hpq);
+
+	/* Start MP Decision slack timer */
+	if (slack_us) {
+		hrtimer_cancel(&msm_mpd.slack_timer);
+		ret = hrtimer_start(&msm_mpd.slack_timer,
+				ktime_set(0, slack_us * NSEC_PER_USEC),
+				HRTIMER_MODE_REL_PINNED);
+		if (ret)
+			pr_err("Failed to register slack timer (%d) %d\n",
+					slack_us, ret);
+	}
+
+	return ret;
+}
+
+static enum hrtimer_restart msm_mpd_slack_timer(struct hrtimer *timer)
+{
+	unsigned long flags;
+
+	trace_printk("mpd:slack_timer_fired!\n");
+
+	spin_lock_irqsave(&rq_avg_lock, flags);
+	if (msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE)
+		goto out;
+
+	msm_mpd.data.nr = 0;
+	msm_mpd.data.event = MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED;
+	wake_up(&msm_mpd.wait_q);
+out:
+	spin_unlock_irqrestore(&rq_avg_lock, flags);
+	return HRTIMER_NORESTART;
+}
+
+static int msm_mpd_idle_notifier(struct notifier_block *self,
+				 unsigned long cmd, void *v)
+{
+	int cpu = smp_processor_id();
+	unsigned long flags;
+
+	switch (cmd) {
+	case CPU_PM_EXIT:
+		spin_lock_irqsave(&rq_avg_lock, flags);
+		hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
+			      msm_mpd.next_update,
+			      HRTIMER_MODE_ABS_PINNED);
+		spin_unlock_irqrestore(&rq_avg_lock, flags);
+		break;
+	case CPU_PM_ENTER:
+		hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int msm_mpd_hotplug_notifier(struct notifier_block *self,
+				    unsigned long action, void *hcpu)
+{
+	int cpu = (int)hcpu;
+	unsigned long flags;
+
+	switch (action & (~CPU_TASKS_FROZEN)) {
+	case CPU_STARTING:
+		spin_lock_irqsave(&rq_avg_lock, flags);
+		hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
+			      msm_mpd.next_update,
+			      HRTIMER_MODE_ABS_PINNED);
+		spin_unlock_irqrestore(&rq_avg_lock, flags);
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block msm_mpd_idle_nb = {
+	.notifier_call = msm_mpd_idle_notifier,
+};
+
+static struct notifier_block msm_mpd_hotplug_nb = {
+	.notifier_call = msm_mpd_hotplug_notifier,
+};
+
+static int __cpuinit msm_mpd_do_hotplug(void *data)
+{
+	int *event = (int *)data;
+	int cpu;
+
+	while (1) {
+		wait_event(msm_mpd.wait_hpq, *event || kthread_should_stop());
+		if (kthread_should_stop())
+			break;
+
+		msm_mpd.hpupdate = HPUPDATE_IN_PROGRESS;
+		/*
+		 * Bring online any offline cores, then offline any online
+		 * cores.  Whenever a core is off/onlined restart the procedure
+		 * in case a new core is desired to be brought online in the
+		 * mean time.
+		 */
+restart:
+		for_each_possible_cpu(cpu) {
+			if ((atomic_read(&msm_mpd.algo_cpu_mask) & (1 << cpu))
+				&& !cpu_online(cpu)) {
+				bring_up_cpu(cpu);
+				if (cpu_online(cpu))
+					goto restart;
+			}
+		}
+
+		if (ktime_to_ns(ktime_sub(ktime_get(), last_down_time)) >
+		    100 * NSEC_PER_MSEC)
+			for_each_possible_cpu(cpu)
+				if (!(atomic_read(&msm_mpd.algo_cpu_mask) &
+				      (1 << cpu)) && cpu_online(cpu)) {
+					bring_down_cpu(cpu);
+					last_down_time = ktime_get();
+					break;
+				}
+		msm_mpd.hpupdate = HPUPDATE_WAITING;
+		msm_dcvs_apply_gpu_floor(0);
+		msm_dcvs_update_algo_params();
+	}
+
+	return 0;
+}
+
+static int msm_mpd_do_update_scm(void *data)
+{
+	struct msm_mpd_scm_data *scm_data = (struct msm_mpd_scm_data *)data;
+	unsigned long flags;
+	enum msm_dcvs_scm_event event;
+	int nr;
+
+	while (1) {
+		wait_event(msm_mpd.wait_q,
+			msm_mpd.data.event == MSM_DCVS_SCM_MPD_QOS_TIMER_EXPIRED
+			|| msm_mpd.data.event == MSM_DCVS_SCM_RUNQ_UPDATE
+			|| kthread_should_stop());
+
+		if (kthread_should_stop())
+			break;
+
+		spin_lock_irqsave(&rq_avg_lock, flags);
+		event = scm_data->event;
+		nr = scm_data->nr;
+		scm_data->event = 0;
+		scm_data->nr = 0;
+		spin_unlock_irqrestore(&rq_avg_lock, flags);
+
+		msm_mpd_update_scm(event, nr);
+	}
+	return 0;
+}
+
+static int __ref msm_mpd_set_enabled(uint32_t enable)
+{
+	int ret = 0;
+	int ret0 = 0;
+	int ret1 = 0;
+	int cpu;
+	static uint32_t last_enable;
+
+	enable = (enable > 0) ? 1 : 0;
+	if (last_enable == enable)
+		return ret;
+
+	if (enable) {
+		ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param);
+		if (ret) {
+			pr_err("Error(%d): msm_mpd_scm_set_algo_params failed\n",
+				ret);
+			return ret;
+		}
+	}
+
+	ret = msm_dcvs_scm_event(0, MSM_DCVS_SCM_MPD_ENABLE, enable, 0,
+			&ret0, &ret1);
+	if (ret) {
+		pr_err("Error(%d) %s MP Decision\n",
+				ret, (enable ? "enabling" : "disabling"));
+	} else {
+		last_enable = enable;
+		last_nr = 0;
+	}
+	if (enable) {
+		msm_mpd.next_update = ktime_add_ns(ktime_get(),
+				(msm_mpd.rq_avg_poll_ms * NSEC_PER_MSEC));
+		msm_mpd.task = kthread_run(msm_mpd_do_update_scm,
+					      &msm_mpd.data, "msm_mpdecision");
+		if (IS_ERR(msm_mpd.task))
+			return -EFAULT;
+
+		msm_mpd.hptask = kthread_run(msm_mpd_do_hotplug,
+						&msm_mpd.hpupdate, "msm_hp");
+		if (IS_ERR(msm_mpd.hptask))
+			return -EFAULT;
+
+		for_each_online_cpu(cpu)
+			hrtimer_start(&per_cpu(rq_avg_poll_timer, cpu),
+				      msm_mpd.next_update,
+				      HRTIMER_MODE_ABS_PINNED);
+		cpu_pm_register_notifier(&msm_mpd_idle_nb);
+		register_cpu_notifier(&msm_mpd_hotplug_nb);
+		msm_mpd.enabled = 1;
+	} else {
+		for_each_online_cpu(cpu)
+			hrtimer_cancel(&per_cpu(rq_avg_poll_timer, cpu));
+		kthread_stop(msm_mpd.hptask);
+		kthread_stop(msm_mpd.task);
+		cpu_pm_unregister_notifier(&msm_mpd_idle_nb);
+		unregister_cpu_notifier(&msm_mpd_hotplug_nb);
+		msm_mpd.enabled = 0;
+	}
+
+	return ret;
+}
+
+static int msm_mpd_set_rq_avg_poll_ms(uint32_t val)
+{
+	/*
+	 * No need to do anything. Just let the timer set its own next poll
+	 * interval when it next fires.
+	 */
+	msm_mpd.rq_avg_poll_ms = val;
+	return 0;
+}
+
+static int msm_mpd_set_iowait_threshold_pct(uint32_t val)
+{
+	/*
+	 * No need to do anything. Just let the timer set its own next poll
+	 * interval when it next fires.
+	 */
+	msm_mpd.iowait_threshold_pct = val;
+	return 0;
+}
+
+static int msm_mpd_set_rq_avg_divide(uint32_t val)
+{
+	/*
+	 * No need to do anything. New value will be used next time
+	 * the decision is made as to whether to update tz.
+	 */
+
+	if (val == 0)
+		return -EINVAL;
+
+	msm_mpd.rq_avg_divide = val;
+	return 0;
+}
+
+#define MPD_ALGO_PARAM(_name, _param) \
+static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
+			struct kobj_attribute *attr, char *buf) \
+{ \
+	return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
+} \
+static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
+		struct kobj_attribute *attr, const char *buf, size_t count) \
+{ \
+	int ret = 0; \
+	uint32_t val; \
+	uint32_t old_val; \
+	mutex_lock(&msm_mpd.lock); \
+	ret = kstrtouint(buf, 10, &val); \
+	if (ret) { \
+		pr_err("Invalid input %s for %s %d\n", \
+				buf, __stringify(_name), ret);\
+		return 0; \
+	} \
+	old_val = _param; \
+	_param = val; \
+	ret = msm_mpd_scm_set_algo_params(&msm_mpd.mp_param); \
+	if (ret) { \
+		pr_err("Error %d returned when setting algo param %s to %d\n",\
+				ret, __stringify(_name), val); \
+		_param = old_val; \
+	} \
+	mutex_unlock(&msm_mpd.lock); \
+	return count; \
+}
+
+#define MPD_PARAM(_name, _param) \
+static ssize_t msm_mpd_attr_##_name##_show(struct kobject *kobj, \
+			struct kobj_attribute *attr, char *buf) \
+{ \
+	return snprintf(buf, PAGE_SIZE, "%d\n", _param); \
+} \
+static ssize_t msm_mpd_attr_##_name##_store(struct kobject *kobj, \
+		struct kobj_attribute *attr, const char *buf, size_t count) \
+{ \
+	int ret = 0; \
+	uint32_t val; \
+	uint32_t old_val; \
+	mutex_lock(&msm_mpd.lock); \
+	ret = kstrtouint(buf, 10, &val); \
+	if (ret) { \
+		pr_err("Invalid input %s for %s %d\n", \
+				buf, __stringify(_name), ret);\
+		return 0; \
+	} \
+	old_val = _param; \
+	ret = msm_mpd_set_##_name(val); \
+	if (ret) { \
+		pr_err("Error %d returned when setting algo param %s to %d\n",\
+				ret, __stringify(_name), val); \
+		_param = old_val; \
+	} \
+	mutex_unlock(&msm_mpd.lock); \
+	return count; \
+}
+
+#define MPD_RW_ATTRIB(i, _name) \
+	msm_mpd.attrib._name.attr.name = __stringify(_name); \
+	msm_mpd.attrib._name.attr.mode = S_IRUGO | S_IWUSR; \
+	msm_mpd.attrib._name.show = msm_mpd_attr_##_name##_show; \
+	msm_mpd.attrib._name.store = msm_mpd_attr_##_name##_store; \
+	msm_mpd.attrib.attrib_group.attrs[i] = &msm_mpd.attrib._name.attr;
+
+MPD_PARAM(enabled, msm_mpd.enabled);
+MPD_PARAM(rq_avg_poll_ms, msm_mpd.rq_avg_poll_ms);
+MPD_PARAM(iowait_threshold_pct, msm_mpd.iowait_threshold_pct);
+MPD_PARAM(rq_avg_divide, msm_mpd.rq_avg_divide);
+MPD_ALGO_PARAM(em_win_size_min_us, msm_mpd.mp_param.em_win_size_min_us);
+MPD_ALGO_PARAM(em_win_size_max_us, msm_mpd.mp_param.em_win_size_max_us);
+MPD_ALGO_PARAM(em_max_util_pct, msm_mpd.mp_param.em_max_util_pct);
+MPD_ALGO_PARAM(mp_em_rounding_point_min,
+				msm_mpd.mp_param.mp_em_rounding_point_min);
+MPD_ALGO_PARAM(mp_em_rounding_point_max,
+				msm_mpd.mp_param.mp_em_rounding_point_max);
+MPD_ALGO_PARAM(online_util_pct_min, msm_mpd.mp_param.online_util_pct_min);
+MPD_ALGO_PARAM(online_util_pct_max, msm_mpd.mp_param.online_util_pct_max);
+MPD_ALGO_PARAM(slack_time_min_us, msm_mpd.mp_param.slack_time_min_us);
+MPD_ALGO_PARAM(slack_time_max_us, msm_mpd.mp_param.slack_time_max_us);
+MPD_ALGO_PARAM(hp_up_max_ms, hp_latencies.hp_up_max_ms);
+MPD_ALGO_PARAM(hp_up_ms, hp_latencies.hp_up_ms);
+MPD_ALGO_PARAM(hp_up_count, hp_latencies.hp_up_count);
+MPD_ALGO_PARAM(hp_dw_max_ms, hp_latencies.hp_dw_max_ms);
+MPD_ALGO_PARAM(hp_dw_ms, hp_latencies.hp_dw_ms);
+MPD_ALGO_PARAM(hp_dw_count, hp_latencies.hp_dw_count);
+
+static int __devinit msm_mpd_probe(struct platform_device *pdev)
+{
+	struct kobject *module_kobj = NULL;
+	int ret = 0;
+	const int attr_count = 20;
+	struct msm_mpd_algo_param *param = NULL;
+
+	param = pdev->dev.platform_data;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
+		ret = -ENOENT;
+		goto done;
+	}
+
+	msm_mpd.attrib.attrib_group.attrs =
+		kzalloc(attr_count * sizeof(struct attribute *), GFP_KERNEL);
+	if (!msm_mpd.attrib.attrib_group.attrs) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	MPD_RW_ATTRIB(0, enabled);
+	MPD_RW_ATTRIB(1, rq_avg_poll_ms);
+	MPD_RW_ATTRIB(2, iowait_threshold_pct);
+	MPD_RW_ATTRIB(3, rq_avg_divide);
+	MPD_RW_ATTRIB(4, em_win_size_min_us);
+	MPD_RW_ATTRIB(5, em_win_size_max_us);
+	MPD_RW_ATTRIB(6, em_max_util_pct);
+	MPD_RW_ATTRIB(7, mp_em_rounding_point_min);
+	MPD_RW_ATTRIB(8, mp_em_rounding_point_max);
+	MPD_RW_ATTRIB(9, online_util_pct_min);
+	MPD_RW_ATTRIB(10, online_util_pct_max);
+	MPD_RW_ATTRIB(11, slack_time_min_us);
+	MPD_RW_ATTRIB(12, slack_time_max_us);
+	MPD_RW_ATTRIB(13, hp_up_max_ms);
+	MPD_RW_ATTRIB(14, hp_up_ms);
+	MPD_RW_ATTRIB(15, hp_up_count);
+	MPD_RW_ATTRIB(16, hp_dw_max_ms);
+	MPD_RW_ATTRIB(17, hp_dw_ms);
+	MPD_RW_ATTRIB(18, hp_dw_count);
+
+	msm_mpd.attrib.attrib_group.attrs[19] = NULL;
+	ret = sysfs_create_group(module_kobj, &msm_mpd.attrib.attrib_group);
+	if (ret)
+		pr_err("Unable to create sysfs objects :%d\n", ret);
+
+	msm_mpd.rq_avg_poll_ms = DEFAULT_RQ_AVG_POLL_MS;
+	msm_mpd.rq_avg_divide = DEFAULT_RQ_AVG_DIVIDE;
+
+	memcpy(&msm_mpd.mp_param, param, sizeof(struct msm_mpd_algo_param));
+
+	debugfs_base = debugfs_create_dir("msm_mpdecision", NULL);
+	if (!debugfs_base) {
+		pr_err("Cannot create debugfs base msm_mpdecision\n");
+		ret = -ENOENT;
+		goto done;
+	}
+
+done:
+	if (ret && debugfs_base)
+		debugfs_remove(debugfs_base);
+
+	return ret;
+}
+
+static int __devexit msm_mpd_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver msm_mpd_driver = {
+	.probe	= msm_mpd_probe,
+	.remove	= __devexit_p(msm_mpd_remove),
+	.driver	= {
+		.name	= "msm_mpdecision",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init msm_mpdecision_init(void)
+{
+	int cpu;
+	if (!msm_mpd_enabled) {
+		pr_info("Not enabled\n");
+		return 0;
+	}
+
+	num_present_hundreds = 100 * num_present_cpus();
+
+	hrtimer_init(&msm_mpd.slack_timer, CLOCK_MONOTONIC,
+			HRTIMER_MODE_REL_PINNED);
+	msm_mpd.slack_timer.function = msm_mpd_slack_timer;
+
+	for_each_possible_cpu(cpu) {
+		hrtimer_init(&per_cpu(rq_avg_poll_timer, cpu),
+			     CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+		per_cpu(rq_avg_poll_timer, cpu).function
+				= msm_mpd_rq_avg_poll_timer;
+	}
+	mutex_init(&msm_mpd.lock);
+	init_waitqueue_head(&msm_mpd.wait_q);
+	init_waitqueue_head(&msm_mpd.wait_hpq);
+	return platform_driver_register(&msm_mpd_driver);
+}
+late_initcall(msm_mpdecision_init);
diff --git a/drivers/cpufreq/cpufreq_gov_msm.c b/drivers/cpufreq/cpufreq_gov_msm.c
index 3fd55b6..8f086aa 100644
--- a/drivers/cpufreq/cpufreq_gov_msm.c
+++ b/drivers/cpufreq/cpufreq_gov_msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,20 +18,114 @@
 #include <linux/kobject.h>
 #include <linux/cpufreq.h>
 #include <linux/platform_device.h>
+#include <linux/cpu_pm.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
 #include <mach/msm_dcvs.h>
 
+struct cpu_idle_info {
+	int			enabled;
+	int			dcvs_core_id;
+	struct pm_qos_request	pm_qos_req;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_idle_info, cpu_idle_info);
+static DEFINE_PER_CPU_SHARED_ALIGNED(u64, iowait_on_cpu);
+static uint32_t latency;
+
+static int msm_dcvs_idle_notifier(int core_num,
+		enum msm_core_control_event event)
+{
+	struct cpu_idle_info *info = &per_cpu(cpu_idle_info, core_num);
+
+	switch (event) {
+	case MSM_DCVS_ENABLE_IDLE_PULSE:
+		info->enabled = true;
+		break;
+
+	case MSM_DCVS_DISABLE_IDLE_PULSE:
+		info->enabled = false;
+		break;
+
+	case MSM_DCVS_ENABLE_HIGH_LATENCY_MODES:
+		pm_qos_update_request(&info->pm_qos_req, PM_QOS_DEFAULT_VALUE);
+		break;
+
+	case MSM_DCVS_DISABLE_HIGH_LATENCY_MODES:
+		pm_qos_update_request(&info->pm_qos_req, latency);
+		break;
+	}
+
+	return 0;
+}
+
+static int msm_cpuidle_notifier(struct notifier_block *self, unsigned long cmd,
+		void *v)
+{
+	struct cpu_idle_info *info =
+		&per_cpu(cpu_idle_info, smp_processor_id());
+	u64 io_wait_us = 0;
+	u64 prev_io_wait_us = 0;
+	u64 last_update_time = 0;
+	u64 val = 0;
+	uint32_t iowaited = 0;
+
+	if (!info->enabled)
+		return NOTIFY_OK;
+
+	switch (cmd) {
+	case CPU_PM_ENTER:
+		val = get_cpu_iowait_time_us(smp_processor_id(),
+					&last_update_time);
+		/* val could be -1 when NOHZ is not enabled */
+		if (val == (u64)-1)
+			val = 0;
+		per_cpu(iowait_on_cpu, smp_processor_id()) = val;
+		msm_dcvs_idle(info->dcvs_core_id, MSM_DCVS_IDLE_ENTER, 0);
+		break;
+
+	case CPU_PM_EXIT:
+		prev_io_wait_us = per_cpu(iowait_on_cpu, smp_processor_id());
+		val = get_cpu_iowait_time_us(smp_processor_id(),
+				&last_update_time);
+		if (val == (u64)-1)
+			val = 0;
+		io_wait_us = val;
+		iowaited = (io_wait_us - prev_io_wait_us);
+		msm_dcvs_idle(info->dcvs_core_id, MSM_DCVS_IDLE_EXIT, iowaited);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block idle_nb = {
+	.notifier_call = msm_cpuidle_notifier,
+};
+
+static void msm_gov_idle_source_init(int cpu, int dcvs_core_id)
+{
+	struct cpu_idle_info *info = NULL;
+
+	info = &per_cpu(cpu_idle_info, cpu);
+	info->dcvs_core_id = dcvs_core_id;
+
+	pm_qos_add_request(&info->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+				PM_QOS_DEFAULT_VALUE);
+}
+
 struct msm_gov {
-	int cpu;
-	unsigned int cur_freq;
-	unsigned int min_freq;
-	unsigned int max_freq;
-	struct msm_dcvs_freq gov_notifier;
-	struct cpufreq_policy *policy;
+	int			cpu;
+	unsigned int		cur_freq;
+	unsigned int		min_freq;
+	unsigned int		max_freq;
+	struct cpufreq_policy	*policy;
+	int			dcvs_core_id;
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct mutex, gov_mutex);
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_gov, msm_gov_info);
-static char core_name[NR_CPUS][10];
 
 static void msm_gov_check_limits(struct cpufreq_policy *policy)
 {
@@ -40,7 +134,7 @@
 	if (policy->max < gov->cur_freq)
 		__cpufreq_driver_target(policy, policy->max,
 				CPUFREQ_RELATION_H);
-	else if (policy->min > gov->min_freq)
+	else if (policy->min > gov->cur_freq)
 		__cpufreq_driver_target(policy, policy->min,
 				CPUFREQ_RELATION_L);
 	else
@@ -50,14 +144,14 @@
 	gov->cur_freq = policy->cur;
 	gov->min_freq = policy->min;
 	gov->max_freq = policy->max;
+	msm_dcvs_update_limits(gov->dcvs_core_id);
 }
 
-static int msm_dcvs_freq_set(struct msm_dcvs_freq *self,
+static int msm_dcvs_freq_set(int core_num,
 		unsigned int freq)
 {
 	int ret = -EINVAL;
-	struct msm_gov *gov =
-		container_of(self, struct msm_gov, gov_notifier);
+	struct msm_gov *gov = &per_cpu(msm_gov_info, core_num);
 
 	mutex_lock(&per_cpu(gov_mutex, gov->cpu));
 
@@ -66,23 +160,30 @@
 	if (freq > gov->max_freq)
 		freq = gov->max_freq;
 
-	ret = __cpufreq_driver_target(gov->policy, freq, CPUFREQ_RELATION_L);
-	gov->cur_freq = gov->policy->cur;
-
 	mutex_unlock(&per_cpu(gov_mutex, gov->cpu));
 
-	if (!ret)
-		return gov->cur_freq;
+	ret = cpufreq_driver_target(gov->policy, freq, CPUFREQ_RELATION_L);
+
+	if (!ret) {
+		gov->cur_freq = cpufreq_quick_get(gov->cpu);
+		if (freq != gov->cur_freq)
+			pr_err("cpu %d freq %u gov->cur_freq %u didn't match",
+						gov->cpu, freq, gov->cur_freq);
+	}
+	ret = gov->cur_freq;
 
 	return ret;
 }
 
-static unsigned int msm_dcvs_freq_get(struct msm_dcvs_freq *self)
+static unsigned int msm_dcvs_freq_get(int core_num)
 {
-	struct msm_gov *gov =
-		container_of(self, struct msm_gov, gov_notifier);
-
-	return gov->cur_freq;
+	struct msm_gov *gov = &per_cpu(msm_gov_info, core_num);
+	/*
+	 * the rw_sem in cpufreq is always held when this is called.
+	 * The policy->cur won't be updated in this case - so it is safe to
+	 * access policy->cur
+	 */
+	return gov->policy->cur;
 }
 
 static int cpufreq_governor_msm(struct cpufreq_policy *policy,
@@ -92,8 +193,6 @@
 	int ret = 0;
 	int handle = 0;
 	struct msm_gov *gov = &per_cpu(msm_gov_info, policy->cpu);
-	struct msm_dcvs_freq *dcvs_notifier =
-			&(per_cpu(msm_gov_info, cpu).gov_notifier);
 
 	switch (event) {
 	case CPUFREQ_GOV_START:
@@ -103,19 +202,14 @@
 		mutex_lock(&per_cpu(gov_mutex, cpu));
 		per_cpu(msm_gov_info, cpu).cpu = cpu;
 		gov->policy = policy;
-		dcvs_notifier->core_name = core_name[cpu];
-		dcvs_notifier->set_frequency = msm_dcvs_freq_set;
-		dcvs_notifier->get_frequency = msm_dcvs_freq_get;
-		handle = msm_dcvs_freq_sink_register(dcvs_notifier);
+		handle = msm_dcvs_freq_sink_start(gov->dcvs_core_id);
 		BUG_ON(handle < 0);
 		msm_gov_check_limits(policy);
 		mutex_unlock(&per_cpu(gov_mutex, cpu));
 		break;
 
 	case CPUFREQ_GOV_STOP:
-		mutex_lock(&per_cpu(gov_mutex, cpu));
-		msm_dcvs_freq_sink_unregister(dcvs_notifier);
-		mutex_unlock(&per_cpu(gov_mutex, cpu));
+		msm_dcvs_freq_sink_stop(gov->dcvs_core_id);
 		break;
 
 	case CPUFREQ_GOV_LIMITS:
@@ -136,21 +230,41 @@
 
 static int __devinit msm_gov_probe(struct platform_device *pdev)
 {
-	int ret = 0;
 	int cpu;
-	uint32_t group_id = 0x43505530; 
 	struct msm_dcvs_core_info *core = NULL;
+	struct msm_dcvs_core_info *core_info = NULL;
+	struct msm_gov_platform_data *pdata = pdev->dev.platform_data;
+	int sensor = 0;
 
 	core = pdev->dev.platform_data;
+	core_info = pdata->info;
+	latency = pdata->latency;
 
 	for_each_possible_cpu(cpu) {
+		struct msm_gov *gov = &per_cpu(msm_gov_info, cpu);
+
 		mutex_init(&per_cpu(gov_mutex, cpu));
-		snprintf(core_name[cpu], 10, "cpu%d", cpu);
-		ret = msm_dcvs_register_core(core_name[cpu], group_id, core);
-		if (ret)
+		if (cpu < core->num_cores)
+			sensor = core_info->sensors[cpu];
+		gov->dcvs_core_id = msm_dcvs_register_core(
+						MSM_DCVS_CORE_TYPE_CPU,
+						cpu,
+						core_info,
+						msm_dcvs_freq_set,
+						msm_dcvs_freq_get,
+						msm_dcvs_idle_notifier,
+						NULL,
+						sensor);
+		if (gov->dcvs_core_id < 0) {
 			pr_err("Unable to register core for %d\n", cpu);
+			return -EINVAL;
+		}
+
+		msm_gov_idle_source_init(cpu, gov->dcvs_core_id);
 	}
 
+	cpu_pm_register_notifier(&idle_nb);
+
 	return cpufreq_register_governor(&cpufreq_gov_msm);
 }
 
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 017b756..41ff28d 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -121,6 +121,7 @@
 				unsigned int permission_type, int version,
 				void *data);
 
+#if 0
 static int allocate_heap_memory(struct ion_heap *heap)
 {
 	struct device *dev = heap->priv;
@@ -169,7 +170,7 @@
 out:
 	return ION_CP_ALLOCATE_FAIL;
 }
-
+#endif
 static void free_heap_memory(struct ion_heap *heap)
 {
 	struct device *dev = heap->priv;
@@ -196,6 +197,7 @@
 	return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
 }
 
+#if 0
 static int ion_on_first_alloc(struct ion_heap *heap)
 {
 	struct ion_cp_heap *cp_heap =
@@ -215,6 +217,7 @@
 	}
 	return 0;
 }
+#endif
 
 static void ion_on_last_free(struct ion_heap *heap)
 {
@@ -243,6 +246,31 @@
 
 	if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
 		/* Make sure we are in C state when the heap is protected. */
+		if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+			ret_value = fmem_set_state(FMEM_C_STATE);
+			if (ret_value)
+				goto out;
+		}
+
+		ret_value = ion_cp_protect_mem(cp_heap->secure_base,
+				cp_heap->secure_size, cp_heap->permission_type,
+				version, data);
+		if (ret_value) {
+			pr_err("Failed to protect memory for heap %s - "
+				"error code: %d\n", heap->name, ret_value);
+
+			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+				if (fmem_set_state(FMEM_T_STATE) != 0)
+					pr_err("%s: unable to transition heap to T-state\n",
+						__func__);
+			}
+			atomic_dec(&cp_heap->protect_cnt);
+		} else {
+			cp_heap->heap_protected = HEAP_PROTECTED;
+			pr_debug("Protected heap %s @ 0x%lx\n",
+				heap->name, cp_heap->base);
+		}
+#if 0
 		if (!cp_heap->allocated_bytes)
 			if (ion_on_first_alloc(heap))
 				goto out;
@@ -263,6 +291,7 @@
 			pr_debug("Protected heap %s @ 0x%lx\n",
 				heap->name, cp_heap->base);
 		}
+#endif
 	}
 out:
 	pr_debug("%s: protect count is %d\n", __func__,
@@ -340,6 +369,14 @@
 		return ION_CP_ALLOCATE_FAIL;
 	}
 
+	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
+		if (fmem_set_state(FMEM_C_STATE) != 0) {
+			mutex_unlock(&cp_heap->lock);
+			return ION_RESERVED_ALLOCATE_FAIL;
+		}
+	}
+
+#if 0
 	/*
 	 * if this is the first reusable allocation, transition
 	 * the heap
@@ -350,6 +387,7 @@
 			return ION_RESERVED_ALLOCATE_FAIL;
 		}
 
+#endif
 	cp_heap->allocated_bytes += size;
 	mutex_unlock(&cp_heap->lock);
 
diff --git a/drivers/media/video/msm/msm_mem.c b/drivers/media/video/msm/msm_mem.c
index 31a7858..d53a368 100644
--- a/drivers/media/video/msm/msm_mem.c
+++ b/drivers/media/video/msm/msm_mem.c
@@ -137,7 +137,7 @@
 	if (IS_ERR_OR_NULL(region->handle))
 		goto out1;
 	if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL,
-				  SZ_4K, 0, &paddr, &len, UNCACHED, 0) < 0)
+				  SZ_4K, 0, &paddr, &len, 0, 0) < 0)
 		goto out2;
 #elif CONFIG_ANDROID_PMEM
 	rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
diff --git a/drivers/media/video/videobuf2-msm-mem.c b/drivers/media/video/videobuf2-msm-mem.c
index 56abec8..706513f 100644
--- a/drivers/media/video/videobuf2-msm-mem.c
+++ b/drivers/media/video/videobuf2-msm-mem.c
@@ -57,7 +57,7 @@
 		goto client_failed;
 	}
 	mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K,
-		(0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID));
+                                    (0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID), 0);
 	if (IS_ERR((void *)mem->ion_handle)) {
 		pr_err("%s Could not allocate\n", __func__);
 		goto alloc_failed;
@@ -65,7 +65,7 @@
 	rc = ion_map_iommu(mem->client, mem->ion_handle,
 			CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0,
 			(unsigned long *)&phyaddr,
-			(unsigned long *)&len, UNCACHED, 0);
+			(unsigned long *)&len, 0, 0);
 	if (rc < 0) {
 		pr_err("%s Could not get physical address\n", __func__);
 		goto phys_failed;
@@ -182,7 +182,7 @@
 		return PTR_ERR(mem->ion_handle);
 	}
 	rc = ion_map_iommu(client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL,
-		SZ_4K, 0, (unsigned long *)&mem->phyaddr, &len, UNCACHED, 0);
+		SZ_4K, 0, (unsigned long *)&mem->phyaddr, &len, 0, 0);
 	if (rc < 0)
 		ion_free(client, mem->ion_handle);
 #elif CONFIG_ANDROID_PMEM
diff --git a/drivers/usb/gadget/f_projector.c b/drivers/usb/gadget/f_projector.c
index 826af22..991362a 100644
--- a/drivers/usb/gadget/f_projector.c
+++ b/drivers/usb/gadget/f_projector.c
@@ -425,7 +425,7 @@
 	input_sync(kdev);
 }
 
-extern char *get_fb_addr(void);
+char *get_fb_addr(void) { return NULL; }
 
 static void send_fb(struct projector_dev *dev)
 {
diff --git a/drivers/video/msm/mdp4_wfd_writeback_panel.c b/drivers/video/msm/mdp4_wfd_writeback_panel.c
index c3d0431..b9cab5f 100644
--- a/drivers/video/msm/mdp4_wfd_writeback_panel.c
+++ b/drivers/video/msm/mdp4_wfd_writeback_panel.c
@@ -80,4 +80,4 @@
 	return rc;
 }
 
-module_init(writeback_panel_init);
+late_initcall(writeback_panel_init);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 313f514..95b15d6 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -25,13 +25,14 @@
 
 #define IOMMU_READ	(1)
 #define IOMMU_WRITE	(2)
-#define IOMMU_CACHE	(4) 
+#define IOMMU_CACHE	(4) /* DMA cache coherency */
 
 struct iommu_ops;
 struct bus_type;
 struct device;
 struct iommu_domain;
 
+/* iommu fault flags */
 #define IOMMU_FAULT_READ	0x0
 #define IOMMU_FAULT_WRITE	0x1
 
@@ -45,10 +46,23 @@
 };
 
 #define IOMMU_CAP_CACHE_COHERENCY	0x1
-#define IOMMU_CAP_INTR_REMAP		0x2	
+#define IOMMU_CAP_INTR_REMAP		0x2	/* isolates device intrs */
 
 #ifdef CONFIG_IOMMU_API
 
+/**
+ * struct iommu_ops - iommu ops and capabilities
+ * @domain_init: init iommu domain
+ * @domain_destroy: destroy iommu domain
+ * @attach_dev: attach device to an iommu domain
+ * @detach_dev: detach device from an iommu domain
+ * @map: map a physically contiguous memory region to an iommu domain
+ * @unmap: unmap a physically contiguous memory region from an iommu domain
+ * @iova_to_phys: translate iova to physical address
+ * @domain_has_cap: domain capabilities query
+ * @commit: commit iommu domain
+ * @pgsize_bitmap: bitmap of supported page sizes
+ */
 struct iommu_ops {
 	int (*domain_init)(struct iommu_domain *domain, int flags);
 	void (*domain_destroy)(struct iommu_domain *domain);
@@ -96,18 +110,46 @@
 					iommu_fault_handler_t handler);
 extern int iommu_device_group(struct device *dev, unsigned int *groupid);
 
+/**
+ * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
+ * @domain: the iommu domain where the fault has happened
+ * @dev: the device where the fault has happened
+ * @iova: the faulting address
+ * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
+ *
+ * This function should be called by the low-level IOMMU implementations
+ * whenever IOMMU faults happen, to allow high-level users, that are
+ * interested in such events, to know about them.
+ *
+ * This event may be useful for several possible use cases:
+ * - mere logging of the event
+ * - dynamic TLB/PTE loading
+ * - if restarting of the faulting device is required
+ *
+ * Returns 0 on success and an appropriate error code otherwise (if dynamic
+ * PTE/TLB loading will one day be supported, implementations will be able
+ * to tell whether it succeeded or not according to this return value).
+ *
+ * Specifically, -ENOSYS is returned if a fault handler isn't installed
+ * (though fault handlers can also return -ENOSYS, in case they want to
+ * elicit the default behavior of the IOMMU drivers).
+ */
 static inline int report_iommu_fault(struct iommu_domain *domain,
 		struct device *dev, unsigned long iova, int flags)
 {
 	int ret = -ENOSYS;
 
+	/*
+	 * if upper layers showed interest and installed a fault handler,
+	 * invoke it.
+	 */
 	if (domain->handler)
 		ret = domain->handler(domain, dev, iova, flags);
 
 	return ret;
 }
 
-#else 
+#else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
 
@@ -188,6 +230,6 @@
 	return -ENODEV;
 }
 
-#endif 
+#endif /* CONFIG_IOMMU_API */
 
-#endif 
+#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
new file mode 100644
index 0000000..8aa758d
--- /dev/null
+++ b/include/linux/iopoll.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2012 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_IOPOLL_H
+#define _LINUX_IOPOLL_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <asm-generic/errno.h>
+#include <asm/io.h>
+
+/**
+ * readl_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in uS (0 tight-loops)
+ * @timeout_us: Timeout in uS, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
+#define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
+({ \
+	unsigned long timeout = jiffies + usecs_to_jiffies(timeout_us); \
+	might_sleep_if(timeout_us); \
+	for (;;) { \
+		(val) = readl(addr); \
+		if ((cond) || (timeout_us && time_after(jiffies, timeout))) \
+			break; \
+		if (sleep_us) \
+			usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
+	} \
+	(cond) ? 0 : -ETIMEDOUT; \
+})
+
+/**
+ * readl_poll_timeout_noirq - Periodically poll an address until a condition is met or a timeout occurs
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @max_reads: Maximum number of reads before giving up
+ * @time_between_us: Time to udelay() between successive reads
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout.
+ */
+#define readl_poll_timeout_noirq(addr, val, cond, max_reads, time_between_us) \
+({ \
+	int count; \
+	for (count = (max_reads); count > 0; count--) { \
+		(val) = readl(addr); \
+		if (cond) \
+			break; \
+		udelay(time_between_us); \
+	} \
+	(cond) ? 0 : -ETIMEDOUT; \
+})
+
+/**
+ * readl_poll - Periodically poll an address until a condition is met
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in uS (0 tight-loops)
+ *
+ * Must not be called from atomic context if sleep_us is used.
+ */
+#define readl_poll(addr, val, cond, sleep_us) \
+	readl_poll_timeout(addr, val, cond, sleep_us, 0)
+
+/**
+ * readl_tight_poll_timeout - Tight-loop on an address until a condition is met or a timeout occurs
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @timeout_us: Timeout in uS, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if timeout_us is used.
+ */
+#define readl_tight_poll_timeout(addr, val, cond, timeout_us) \
+	readl_poll_timeout(addr, val, cond, 0, timeout_us)
+
+/**
+ * readl_tight_poll - Tight-loop on an address until a condition is met
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ *
+ * May be called from atomic context.
+ */
+#define readl_tight_poll(addr, val, cond) \
+	readl_poll_timeout(addr, val, cond, 0, 0)
+
+#endif /* _LINUX_IOPOLL_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 562312a..f070d7c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -118,6 +118,7 @@
 extern unsigned long nr_iowait_cpu(int cpu);
 extern unsigned long this_cpu_load(void);
 
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg);
 
 extern void calc_global_load(unsigned long ticks);
 
diff --git a/include/linux/slimport.h b/include/linux/slimport.h
new file mode 100644
index 0000000..f55fd2c
--- /dev/null
+++ b/include/linux/slimport.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright(c) 2012, LG Electronics Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SLIMPORT_H
+#define __SLIMPORT_H
+
+
+#define SSC_EN
+#define HDCP_EN
+
+#if 0
+#define SSC_1
+#define EYE_TEST
+#define EDID_DEBUG_PRINT
+#endif
+
+#define AUX_ERR  1
+#define AUX_OK   0
+
+extern unchar  sp_tx_hw_lt_done;
+extern bool  sp_tx_hw_lt_enable;
+extern unchar	sp_tx_link_config_done ;
+extern enum SP_TX_System_State sp_tx_system_state;
+extern unchar sp_tx_rx_anx7730;
+extern unchar sp_tx_pd_mode;
+extern unchar sp_tx_rx_mydp;
+
+extern unchar bedid_break;
+
+int sp_read_reg(uint8_t slave_addr, uint8_t offset, uint8_t *buf);
+int sp_write_reg(uint8_t slave_addr, uint8_t offset, uint8_t value);
+void sp_tx_hardware_poweron(void);
+void sp_tx_hardware_powerdown(void);
+int slimport_read_edid_block(int block, uint8_t *edid_buf);
+
+#ifdef CONFIG_SLIMPORT_ANX7808
+bool slimport_is_connected(void);
+unchar sp_get_link_bw(void);
+void sp_set_link_bw(unchar link_bw);
+#else
+static inline bool slimport_is_connected(void)
+{
+	return false;
+}
+static inline unchar sp_get_link_bw(void)
+{
+	return 0;
+}
+static inline void sp_set_link_bw(unchar link_bw)
+{
+	return;
+}
+#endif
+#endif
diff --git a/include/trace/events/mpdcvs_trace.h b/include/trace/events/mpdcvs_trace.h
new file mode 100644
index 0000000..0db1378
--- /dev/null
+++ b/include/trace/events/mpdcvs_trace.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2012, Free Software Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM  mpdcvs_trace
+
+#if !defined(_TRACE_MPDCVS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MPDCVS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(msm_mp,
+
+	TP_PROTO(const char *name, int mp_val),
+
+	TP_ARGS(name, mp_val),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, mp_val)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->mp_val = mp_val;
+	),
+
+	TP_printk("ev_name=%s ev_level=%d",
+		__get_str(name),
+		__entry->mp_val)
+);
+
+/* Core function of run_q */
+
+DEFINE_EVENT(msm_mp, msm_mp_runq,
+
+	TP_PROTO(const char *name, int mp_val),
+
+	TP_ARGS(name, mp_val)
+);
+
+DEFINE_EVENT(msm_mp, msm_mp_cpusonline,
+
+	TP_PROTO(const char *name, int mp_val),
+
+	TP_ARGS(name, mp_val)
+);
+
+DEFINE_EVENT(msm_mp, msm_mp_slacktime,
+
+	TP_PROTO(const char *name, int mp_val),
+
+	TP_ARGS(name, mp_val)
+);
+
+DECLARE_EVENT_CLASS(msm_dcvs,
+
+	TP_PROTO(const char *name, const char *cpuid, int val),
+
+	TP_ARGS(name, cpuid, val),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__string(cpuid, cpuid)
+		__field(int, val)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__assign_str(cpuid, cpuid);
+		__entry->val = val;
+	),
+
+	TP_printk("ev_name=%s d_name=%s ev_level=%d",
+		__get_str(name),
+		__get_str(cpuid),
+		__entry->val)
+);
+
+/* Core function of dcvs */
+
+DEFINE_EVENT(msm_dcvs, msm_dcvs_idle,
+
+	TP_PROTO(const char *name, const char *cpuid, int val),
+
+	TP_ARGS(name, cpuid, val)
+);
+
+DEFINE_EVENT(msm_dcvs, msm_dcvs_iowait,
+
+	TP_PROTO(const char *name, const char *cpuid, int val),
+
+	TP_ARGS(name, cpuid, val)
+);
+
+DEFINE_EVENT(msm_dcvs, msm_dcvs_slack_time,
+
+	TP_PROTO(const char *name, const char *cpuid, int val),
+
+	TP_ARGS(name, cpuid, val)
+);
+
+DECLARE_EVENT_CLASS(msm_dcvs_scm,
+
+	TP_PROTO(unsigned long cpuid, int ev_type, unsigned long param0,
+		unsigned long param1, unsigned long ret0, unsigned long ret1),
+
+	TP_ARGS(cpuid, ev_type, param0, param1, ret0, ret1),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, cpuid)
+		__field(int, ev_type)
+		__field(unsigned long, param0)
+		__field(unsigned long, param1)
+		__field(unsigned long, ret0)
+		__field(unsigned long, ret1)
+	),
+
+	TP_fast_assign(
+		__entry->cpuid = cpuid;
+		__entry->ev_type = ev_type;
+		__entry->param0 = param0;
+		__entry->param1 = param1;
+		__entry->ret0 = ret0;
+		__entry->ret1 = ret1;
+	),
+
+	TP_printk("dev=%lu ev_type=%d ev_param0=%lu ev_param1=%lu ev_ret0=%lu ev_ret1=%lu",
+		__entry->cpuid,
+		__entry->ev_type,
+		__entry->param0,
+		__entry->param1,
+		__entry->ret0,
+		__entry->ret1)
+);
+
+DEFINE_EVENT(msm_dcvs_scm, msm_dcvs_scm_event,
+
+	TP_PROTO(unsigned long cpuid, int ev_type, unsigned long param0,
+		unsigned long param1, unsigned long ret0, unsigned long ret1),
+
+	TP_ARGS(cpuid, ev_type, param0, param1, ret0, ret1)
+);
+
+#endif /* _TRACE_MPDCVS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 9a7dd35..3ede7d9 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -11,7 +11,7 @@
 CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
 endif
 
-obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o
+obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o sched_avg.o
 obj-$(CONFIG_SMP) += cpupri.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
new file mode 100644
index 0000000..8eaf2f7
--- /dev/null
+++ b/kernel/sched/sched_avg.c
@@ -0,0 +1,106 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Scheduler hook for average runqueue determination
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/hrtimer.h>
+#include <linux/sched.h>
+#include <linux/math64.h>
+
+static DEFINE_PER_CPU(u64, nr_prod_sum);
+static DEFINE_PER_CPU(u64, last_time);
+static DEFINE_PER_CPU(u64, nr);
+static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
+static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
+static s64 last_get_time;
+
+/**
+ * sched_get_nr_running_avg
+ * @return: Average nr_running and iowait value since last poll.
+ *	    Returns the avg * 100 to return up to two decimal points
+ *	    of accuracy.
+ *
+ * Obtains the average nr_running value since the last poll.
+ * This function may not be called concurrently with itself
+ */
+void sched_get_nr_running_avg(int *avg, int *iowait_avg)
+{
+	int cpu;
+	u64 curr_time = sched_clock();
+	u64 diff = curr_time - last_get_time;
+	u64 tmp_avg = 0, tmp_iowait = 0;
+
+	*avg = 0;
+	*iowait_avg = 0;
+
+	if (!diff)
+		return;
+
+	last_get_time = curr_time;
+	/* read and reset nr_running counts */
+	for_each_possible_cpu(cpu) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+		tmp_avg += per_cpu(nr_prod_sum, cpu);
+		tmp_avg += per_cpu(nr, cpu) *
+			(curr_time - per_cpu(last_time, cpu));
+		tmp_iowait = per_cpu(iowait_prod_sum, cpu);
+		tmp_iowait +=  nr_iowait_cpu(cpu) *
+			(curr_time - per_cpu(last_time, cpu));
+		per_cpu(last_time, cpu) = curr_time;
+		per_cpu(nr_prod_sum, cpu) = 0;
+		per_cpu(iowait_prod_sum, cpu) = 0;
+		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+	}
+
+	*avg = (int)div64_u64(tmp_avg * 100, diff);
+	*iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
+
+	BUG_ON(*avg < 0);
+	pr_debug("%s - avg:%d\n", __func__, *avg);
+	BUG_ON(*iowait_avg < 0);
+	pr_debug("%s - avg:%d\n", __func__, *iowait_avg);
+}
+EXPORT_SYMBOL(sched_get_nr_running_avg);
+
+/**
+ * sched_update_nr_prod
+ * @cpu: The core id of the nr running driver.
+ * @nr: Updated nr running value for cpu.
+ * @inc: Whether we are increasing or decreasing the count
+ * @return: N/A
+ *
+ * Update average with latest nr_running value for CPU
+ */
+void sched_update_nr_prod(int cpu, unsigned long nr_running, bool inc)
+{
+	int diff;
+	s64 curr_time;
+	unsigned long flags;
+
+	spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+	curr_time = sched_clock();
+	diff = curr_time - per_cpu(last_time, cpu);
+	per_cpu(last_time, cpu) = curr_time;
+	per_cpu(nr, cpu) = nr_running + (inc ? 1 : -1);
+
+	BUG_ON(per_cpu(nr, cpu) < 0);
+
+	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
+	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
+	spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+}
+EXPORT_SYMBOL(sched_update_nr_prod);
diff --git a/scripts/gcc-wrapper.py b/scripts/gcc-wrapper.py
index 583a5ce..991bf31 100755
--- a/scripts/gcc-wrapper.py
+++ b/scripts/gcc-wrapper.py
@@ -80,7 +80,7 @@
         proc = subprocess.Popen(args, stderr=subprocess.PIPE)
         for line in proc.stderr:
             print line,
-            interpret_warning(line)
+#            interpret_warning(line)
 
         result = proc.wait()
     except OSError as e: