port 3.0 kernel power source in pieces
 source.c
 main.c

Change-Id: Ifa25fab256d8bff11bede4e35236da5857e39d78
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 596bf9f..6d696a9 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2003 Patrick Mochel
  * Copyright (c) 2003 Open Source Development Lab
- *
+ * 
  * This file is released under the GPLv2
  *
  */
@@ -13,13 +13,11 @@
 #include <linux/string.h>
 #include <linux/resume-trace.h>
 #include <linux/workqueue.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/hrtimer.h>
 
 #include "power.h"
-
-#define MAX_BUF 100
+#ifdef CONFIG_SEC_DVFS
+#include <linux/cpufreq.h>
+#endif
 
 DEFINE_MUTEX(pm_mutex);
 
@@ -29,13 +27,6 @@
 
 static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
 
-static void touch_event_fn(struct work_struct *work);
-static DECLARE_WORK(touch_event_struct, touch_event_fn);
-
-static struct hrtimer tc_ev_timer;
-static int tc_ev_processed;
-static ktime_t touch_evt_timer_val;
-
 int register_pm_notifier(struct notifier_block *nb)
 {
 	return blocking_notifier_chain_register(&pm_chain_head, nb);
@@ -50,9 +41,8 @@
 
 int pm_notifier_call_chain(unsigned long val)
 {
-	int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
-
-	return notifier_to_errno(ret);
+	return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
+			== NOTIFY_BAD) ? -EINVAL : 0;
 }
 
 /* If set, devices may be suspended and resumed asynchronously. */
@@ -81,81 +71,6 @@
 
 power_attr(pm_async);
 
-static ssize_t
-touch_event_show(struct kobject *kobj,
-		 struct kobj_attribute *attr, char *buf)
-{
-	if (tc_ev_processed == 0)
-		return snprintf(buf, strnlen("touch_event", MAX_BUF) + 1,
-				"touch_event");
-	else
-		return snprintf(buf, strnlen("null", MAX_BUF) + 1,
-				"null");
-}
-
-static ssize_t
-touch_event_store(struct kobject *kobj,
-		  struct kobj_attribute *attr,
-		  const char *buf, size_t n)
-{
-
-	hrtimer_cancel(&tc_ev_timer);
-	tc_ev_processed = 0;
-
-	/* set a timer to notify the userspace to stop processing
-	 * touch event
-	 */
-	hrtimer_start(&tc_ev_timer, touch_evt_timer_val, HRTIMER_MODE_REL);
-
-	/* wakeup the userspace poll */
-	sysfs_notify(kobj, NULL, "touch_event");
-
-	return n;
-}
-
-power_attr(touch_event);
-
-static ssize_t
-touch_event_timer_show(struct kobject *kobj,
-		 struct kobj_attribute *attr, char *buf)
-{
-	return snprintf(buf, MAX_BUF, "%lld", touch_evt_timer_val.tv64);
-}
-
-static ssize_t
-touch_event_timer_store(struct kobject *kobj,
-			struct kobj_attribute *attr,
-			const char *buf, size_t n)
-{
-	unsigned long val;
-
-	if (strict_strtoul(buf, 10, &val))
-		return -EINVAL;
-
-	touch_evt_timer_val = ktime_set(0, val*1000);
-
-	return n;
-}
-
-power_attr(touch_event_timer);
-
-static void touch_event_fn(struct work_struct *work)
-{
-	/* wakeup the userspace poll */
-	tc_ev_processed = 1;
-	sysfs_notify(power_kobj, NULL, "touch_event");
-
-	return;
-}
-
-static enum hrtimer_restart tc_ev_stop(struct hrtimer *hrtimer)
-{
-
-	schedule_work(&touch_event_struct);
-
-	return HRTIMER_NORESTART;
-}
-
 #ifdef CONFIG_PM_DEBUG
 int pm_test_level = TEST_NONE;
 
@@ -201,7 +116,7 @@
 	p = memchr(buf, '\n', n);
 	len = p ? p - buf : n;
 
-	lock_system_sleep();
+	mutex_lock(&pm_mutex);
 
 	level = TEST_FIRST;
 	for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
@@ -211,7 +126,7 @@
 			break;
 		}
 
-	unlock_system_sleep();
+	mutex_unlock(&pm_mutex);
 
 	return error ? error : n;
 }
@@ -219,105 +134,6 @@
 power_attr(pm_test);
 #endif /* CONFIG_PM_DEBUG */
 
-#ifdef CONFIG_DEBUG_FS
-static char *suspend_step_name(enum suspend_stat_step step)
-{
-	switch (step) {
-	case SUSPEND_FREEZE:
-		return "freeze";
-	case SUSPEND_PREPARE:
-		return "prepare";
-	case SUSPEND_SUSPEND:
-		return "suspend";
-	case SUSPEND_SUSPEND_NOIRQ:
-		return "suspend_noirq";
-	case SUSPEND_RESUME_NOIRQ:
-		return "resume_noirq";
-	case SUSPEND_RESUME:
-		return "resume";
-	default:
-		return "";
-	}
-}
-
-static int suspend_stats_show(struct seq_file *s, void *unused)
-{
-	int i, index, last_dev, last_errno, last_step;
-
-	last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
-	last_dev %= REC_FAILED_NUM;
-	last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
-	last_errno %= REC_FAILED_NUM;
-	last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
-	last_step %= REC_FAILED_NUM;
-	seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
-			"%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
-			"success", suspend_stats.success,
-			"fail", suspend_stats.fail,
-			"failed_freeze", suspend_stats.failed_freeze,
-			"failed_prepare", suspend_stats.failed_prepare,
-			"failed_suspend", suspend_stats.failed_suspend,
-			"failed_suspend_late",
-				suspend_stats.failed_suspend_late,
-			"failed_suspend_noirq",
-				suspend_stats.failed_suspend_noirq,
-			"failed_resume", suspend_stats.failed_resume,
-			"failed_resume_early",
-				suspend_stats.failed_resume_early,
-			"failed_resume_noirq",
-				suspend_stats.failed_resume_noirq);
-	seq_printf(s,	"failures:\n  last_failed_dev:\t%-s\n",
-			suspend_stats.failed_devs[last_dev]);
-	for (i = 1; i < REC_FAILED_NUM; i++) {
-		index = last_dev + REC_FAILED_NUM - i;
-		index %= REC_FAILED_NUM;
-		seq_printf(s, "\t\t\t%-s\n",
-			suspend_stats.failed_devs[index]);
-	}
-	seq_printf(s,	"  last_failed_errno:\t%-d\n",
-			suspend_stats.errno[last_errno]);
-	for (i = 1; i < REC_FAILED_NUM; i++) {
-		index = last_errno + REC_FAILED_NUM - i;
-		index %= REC_FAILED_NUM;
-		seq_printf(s, "\t\t\t%-d\n",
-			suspend_stats.errno[index]);
-	}
-	seq_printf(s,	"  last_failed_step:\t%-s\n",
-			suspend_step_name(
-				suspend_stats.failed_steps[last_step]));
-	for (i = 1; i < REC_FAILED_NUM; i++) {
-		index = last_step + REC_FAILED_NUM - i;
-		index %= REC_FAILED_NUM;
-		seq_printf(s, "\t\t\t%-s\n",
-			suspend_step_name(
-				suspend_stats.failed_steps[index]));
-	}
-
-	return 0;
-}
-
-static int suspend_stats_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, suspend_stats_show, NULL);
-}
-
-static const struct file_operations suspend_stats_operations = {
-	.open           = suspend_stats_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release        = single_release,
-};
-
-static int __init pm_debugfs_init(void)
-{
-	debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
-			NULL, NULL, &suspend_stats_operations);
-	return 0;
-}
-
-late_initcall(pm_debugfs_init);
-#endif /* CONFIG_DEBUG_FS */
-
 #endif /* CONFIG_PM_SLEEP */
 
 struct kobject *power_kobj;
@@ -329,7 +145,7 @@
  *	'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
  *	'disk' (Suspend-to-Disk).
  *
- *	store() accepts one of those strings, translates it into the
+ *	store() accepts one of those strings, translates it into the 
  *	proper enumerated value, and initiates a suspend transition.
  */
 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -375,23 +191,23 @@
 	/* First, check if we are requested to hibernate */
 	if (len == 4 && !strncmp(buf, "disk", len)) {
 		error = hibernate();
-		goto Exit;
+  goto Exit;
 	}
 
 #ifdef CONFIG_SUSPEND
 	for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
-		if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
-#ifdef CONFIG_EARLYSUSPEND
-			if (state == PM_SUSPEND_ON || valid_state(state)) {
-				error = 0;
-				request_suspend_state(state);
-				break;
-			}
-#else
-			error = pm_suspend(state);
-#endif
-		}
+		if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
+			break;
 	}
+	if (state < PM_SUSPEND_MAX && *s)
+#ifdef CONFIG_EARLYSUSPEND
+		if (state == PM_SUSPEND_ON || valid_state(state)) {
+			error = 0;
+			request_suspend_state(state);
+		}
+#else
+		error = enter_state(state);
+#endif
 #endif
 
  Exit:
@@ -501,7 +317,216 @@
 power_attr(wake_unlock);
 #endif
 
-static struct attribute *g[] = {
+#ifdef CONFIG_SEC_DVFS
+DEFINE_MUTEX(dvfs_mutex);
+static unsigned long dvfs_id = 0;
+static unsigned long apps_min_freq = MIN_FREQ_LIMIT;
+static unsigned long apps_max_freq = MAX_FREQ_LIMIT;
+static unsigned long thermald_max_freq = MAX_FREQ_LIMIT;
+
+static unsigned long mhl_min_freq = MIN_FREQ_LIMIT;
+static unsigned long touch_min_freq = MAX_TOUCH_LIMIT;
+static unsigned long unicpu_max_freq = MAX_UNICPU_LIMIT;
+
+static int verify_cpufreq_target(unsigned int target)
+{
+	int i;
+	struct cpufreq_frequency_table *table;
+
+	table = cpufreq_frequency_get_table(BOOT_CPU);
+	if (table == NULL)
+		return -EFAULT;
+
+	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+		if (table[i].frequency < MIN_FREQ_LIMIT || table[i].frequency > MAX_FREQ_LIMIT)
+			continue;
+		
+		if (target == table[i].frequency)
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
+int set_freq_limit(unsigned long id, unsigned int freq)
+{
+	unsigned int min = MIN_FREQ_LIMIT;
+	unsigned int max = MAX_FREQ_LIMIT;
+	unsigned int cur = 0;	
+
+	if (id < 0)
+		return -EINVAL;
+	
+	if (freq != 0 && freq != -1 && verify_cpufreq_target(freq))
+		return -EINVAL;
+
+	mutex_lock(&dvfs_mutex);
+
+	if (freq == -1)
+		dvfs_id &= ~id;
+	else
+		dvfs_id |= id;
+
+	/* update freq for apps/thermald */
+	if (id == DVFS_APPS_MIN_ID)
+		apps_min_freq = freq;
+	else if (id == DVFS_MHL_ID)
+		mhl_min_freq = freq;
+	else if (id == DVFS_APPS_MAX_ID)
+		apps_max_freq = freq;
+	else if (id == DVFS_THERMALD_ID)
+		thermald_max_freq = freq;
+
+	/* set min - apps */
+	if (dvfs_id & DVFS_APPS_MIN_ID && min < apps_min_freq)
+		min = apps_min_freq;
+	if (dvfs_id & DVFS_MHL_ID && min < mhl_min_freq)
+		min = mhl_min_freq;
+	if (dvfs_id & DVFS_TOUCH_ID && min < touch_min_freq)
+		min = touch_min_freq;
+
+	/* set max */
+	if (dvfs_id & DVFS_APPS_MAX_ID && max > apps_max_freq)
+		max = apps_max_freq;
+	if (dvfs_id & DVFS_THERMALD_ID && max > thermald_max_freq)
+		max = thermald_max_freq;
+	if (dvfs_id & DVFS_UNICPU_ID && max > unicpu_max_freq)
+		max = unicpu_max_freq;
+
+	/* check min max*/
+	if (min > max)
+		min = max;
+
+	/* update */
+	set_min_lock(min);
+	set_max_lock(max);
+        #ifndef CONFIG_USA_MODEL_SGH_T989
+	printk("%s: 0x%lx %d, min %d, max %d\n", __FUNCTION__, id, freq, min, max);
+	#endif
+
+	/* need to update now */
+	if (id & UPDATE_NOW_BITS)
+	{
+		int cpu;
+
+		for_each_online_cpu(cpu) {
+			cur = cpufreq_quick_get(cpu);
+			if (cur) {
+				struct cpufreq_policy policy;
+				policy.cpu = cpu;
+				
+				if (cur < min)
+					cpufreq_driver_target(&policy, min, CPUFREQ_RELATION_H);
+				else if (cur > max)
+					cpufreq_driver_target(&policy, max, CPUFREQ_RELATION_L);
+			}
+		}
+	}
+	
+	mutex_unlock(&dvfs_mutex);	
+
+	return 0;
+}
+
+static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int freq;
+
+	freq = get_min_lock();
+	if (!freq)
+		freq = -1;
+
+	return sprintf(buf, "%d\n", freq);
+}
+
+static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					const char *buf, size_t n)
+{
+	int freq_min_limit;
+	
+	sscanf(buf, "%d", &freq_min_limit);
+
+	set_freq_limit(DVFS_APPS_MIN_ID, freq_min_limit);
+#ifdef CONFIG_SEC_DVFS_DUAL
+	if (freq_min_limit == MAX_FREQ_LIMIT)
+		dual_boost(1);
+	else
+		dual_boost(0);
+#endif		
+	return n;
+}
+
+static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int freq;
+
+	freq = get_max_lock();
+	if (!freq)
+		freq = -1;
+	
+	return sprintf(buf, "%d\n", freq);
+}
+
+static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					const char *buf, size_t n)
+{
+	int freq_max_limit;
+
+	sscanf(buf, "%d", &freq_max_limit);
+
+	set_freq_limit(DVFS_APPS_MAX_ID, freq_max_limit);
+	
+	return n;
+}
+static ssize_t cpufreq_table_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	ssize_t len = 0;
+	int i, count;
+	unsigned int freq;
+
+	struct cpufreq_frequency_table *table;
+
+	table = cpufreq_frequency_get_table(BOOT_CPU);
+	if (table == NULL)
+		return 0;
+
+	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) ;
+	count = i;
+
+	for (i = count-1; i >= 0; i--) {
+		freq = table[i].frequency;
+
+		if (freq < MIN_FREQ_LIMIT || freq > MAX_FREQ_LIMIT)
+			continue;
+
+		len += sprintf(buf + len, "%u ", freq);
+	}
+
+	len--; 
+	len += sprintf(buf + len, "\n");
+
+	return len;
+}
+
+static ssize_t cpufreq_table_store(struct kobject *kobj,
+					struct kobj_attribute *attr,
+					const char *buf, size_t n)
+{
+	printk("%s: Not supported\n", __func__);
+	return n;
+}
+
+power_attr(cpufreq_max_limit);
+power_attr(cpufreq_min_limit);
+power_attr(cpufreq_table);
+#endif
+
+static struct attribute * g[] = {
 	&state_attr.attr,
 #ifdef CONFIG_PM_TRACE
 	&pm_trace_attr.attr,
@@ -510,8 +535,6 @@
 #ifdef CONFIG_PM_SLEEP
 	&pm_async_attr.attr,
 	&wakeup_count_attr.attr,
-	&touch_event_attr.attr,
-	&touch_event_timer_attr.attr,
 #ifdef CONFIG_PM_DEBUG
 	&pm_test_attr.attr,
 #endif
@@ -520,6 +543,11 @@
 	&wake_unlock_attr.attr,
 #endif
 #endif
+#ifdef CONFIG_SEC_DVFS
+	&cpufreq_min_limit_attr.attr,
+	&cpufreq_max_limit_attr.attr,
+	&cpufreq_table_attr.attr,
+#endif
 	NULL,
 };
 
@@ -548,13 +576,6 @@
 		return error;
 	hibernate_image_size_init();
 	hibernate_reserved_size_init();
-
-	touch_evt_timer_val = ktime_set(2, 0);
-	hrtimer_init(&tc_ev_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	tc_ev_timer.function = &tc_ev_stop;
-	tc_ev_processed = 1;
-
-
 	power_kobj = kobject_create_and_add("power", NULL);
 	if (!power_kobj)
 		return -ENOMEM;