Revert "port 3.0 kernel power source in pieces"
This reverts commit 17b94bef87f4797675ce9716725c70d4828a3db8.
Revert "workqueues: Introduce new flag WQ_POWER_EFFICIENT for power oriented workqueues"
This reverts commit b2f60dfc8a5f68d24936f9cead661b8f7856567d.
Change-Id: Ic2487f4ccbf47cde71bb96385e76468a44dfca8b
Revert "workqueue: Add system wide power_efficient workqueues"
This reverts commit 157ecab851fd1f788fd6d3c7d76dc30dbc6278aa.
Revert "workqueues: add missing header file"
This reverts commit 7f490b21c8edbd5f47320015c05c3086909d51a4.
Change-Id: I0aa3067a2a8414bcfc1035ca1433e97b9cbbdedd
Revert "block: queue work on power efficient wq"
This reverts commit b65d8b7d030437bf982ed1bfcd619d7f427fc9b4.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d66ef43..b524935 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2973,22 +2973,6 @@
or other driver-specific files in the
Documentation/watchdog/ directory.
-
- workqueue.power_efficient
- Per-cpu workqueues are generally preferred because
- they show better performance thanks to cache
- locality; unfortunately, per-cpu workqueues tend to
- be more power hungry than unbound workqueues.
-
- Enabling this makes the per-cpu workqueues which
- were observed to contribute significantly to power
- consumption unbound, leading to measurably lower
- power usage at the cost of small performance
- overhead.
-
- The default value of this parameter is determined by
- the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
-
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
default x2apic cluster mode on platforms
supporting x2apic.
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index d0ceaf4..af15545 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -258,33 +258,6 @@
WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
- /*
- * Per-cpu workqueues are generally preferred because they tend to
- * show better performance thanks to cache locality. Per-cpu
- * workqueues exclude the scheduler from choosing the CPU to
- * execute the worker threads, which has an unfortunate side effect
- * of increasing power consumption.
- *
- * The scheduler considers a CPU idle if it doesn't have any task
- * to execute and tries to keep idle cores idle to conserve power;
- * however, for example, a per-cpu work item scheduled from an
- * interrupt handler on an idle CPU will force the scheduler to
- * excute the work item on that CPU breaking the idleness, which in
- * turn may lead to more scheduling choices which are sub-optimal
- * in terms of power consumption.
- *
- * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
- * but become unbound if workqueue.power_efficient kernel param is
- * specified. Per-cpu workqueues which are identified to
- * contribute significantly to power-consumption are identified and
- * marked with this flag and enabling the power_efficient mode
- * leads to noticeable power saving at the cost of small
- * performance disadvantage.
- *
- * http://thread.gmane.org/gmane.linux.kernel/1480396
- */
- WQ_POWER_EFFICIENT = 1 << 8,
-
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
@@ -319,12 +292,6 @@
*
* system_nrt_freezable_wq is equivalent to system_nrt_wq except that
* it's freezable.
- *
- * *_power_efficient_wq are inclined towards saving power and converted
- * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
- * they are same as their non-power-efficient counterparts - e.g.
- * system_power_efficient_wq is identical to system_wq if
- * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
*/
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_long_wq;
@@ -332,9 +299,7 @@
extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_nrt_freezable_wq;
-extern struct workqueue_struct *system_power_efficient_wq;
-extern struct workqueue_struct *system_freezable_power_efficient_wq;
-
+
extern struct workqueue_struct *
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 4a97f3b..4e059d5 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -311,27 +311,6 @@
bool
depends on SUSPEND || CPU_IDLE
-
-config WQ_POWER_EFFICIENT_DEFAULT
- bool "Enable workqueue power-efficient mode by default"
- depends on PM
- default n
- help
- Per-cpu workqueues are generally preferred because they show
- better performance thanks to cache locality; unfortunately,
- per-cpu workqueues tend to be more power hungry than unbound
- workqueues.
-
- Enabling workqueue.power_efficient kernel parameter makes the
- per-cpu workqueues which were observed to contribute
- significantly to power consumption unbound, leading to measurably
- lower power usage at the cost of small performance overhead.
-
- This config option determines whether workqueue.power_efficient
- is enabled by default.
-
- If in doubt, say N.
-
config SUSPEND_TIME
bool "Log time spent in suspend"
---help---
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 6d696a9..596bf9f 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
- *
+ *
* This file is released under the GPLv2
*
*/
@@ -13,11 +13,13 @@
#include <linux/string.h>
#include <linux/resume-trace.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/hrtimer.h>
#include "power.h"
-#ifdef CONFIG_SEC_DVFS
-#include <linux/cpufreq.h>
-#endif
+
+#define MAX_BUF 100
DEFINE_MUTEX(pm_mutex);
@@ -27,6 +29,13 @@
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
+static void touch_event_fn(struct work_struct *work);
+static DECLARE_WORK(touch_event_struct, touch_event_fn);
+
+static struct hrtimer tc_ev_timer;
+static int tc_ev_processed;
+static ktime_t touch_evt_timer_val;
+
int register_pm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&pm_chain_head, nb);
@@ -41,8 +50,9 @@
int pm_notifier_call_chain(unsigned long val)
{
- return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
- == NOTIFY_BAD) ? -EINVAL : 0;
+ int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
+
+ return notifier_to_errno(ret);
}
/* If set, devices may be suspended and resumed asynchronously. */
@@ -71,6 +81,81 @@
power_attr(pm_async);
+static ssize_t
+touch_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ if (tc_ev_processed == 0)
+ return snprintf(buf, strnlen("touch_event", MAX_BUF) + 1,
+ "touch_event");
+ else
+ return snprintf(buf, strnlen("null", MAX_BUF) + 1,
+ "null");
+}
+
+static ssize_t
+touch_event_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+
+ hrtimer_cancel(&tc_ev_timer);
+ tc_ev_processed = 0;
+
+ /* set a timer to notify the userspace to stop processing
+ * touch event
+ */
+ hrtimer_start(&tc_ev_timer, touch_evt_timer_val, HRTIMER_MODE_REL);
+
+ /* wakeup the userspace poll */
+ sysfs_notify(kobj, NULL, "touch_event");
+
+ return n;
+}
+
+power_attr(touch_event);
+
+static ssize_t
+touch_event_timer_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, MAX_BUF, "%lld", touch_evt_timer_val.tv64);
+}
+
+static ssize_t
+touch_event_timer_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ touch_evt_timer_val = ktime_set(0, val*1000);
+
+ return n;
+}
+
+power_attr(touch_event_timer);
+
+static void touch_event_fn(struct work_struct *work)
+{
+ /* wakeup the userspace poll */
+ tc_ev_processed = 1;
+ sysfs_notify(power_kobj, NULL, "touch_event");
+
+ return;
+}
+
+static enum hrtimer_restart tc_ev_stop(struct hrtimer *hrtimer)
+{
+
+ schedule_work(&touch_event_struct);
+
+ return HRTIMER_NORESTART;
+}
+
#ifdef CONFIG_PM_DEBUG
int pm_test_level = TEST_NONE;
@@ -116,7 +201,7 @@
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
level = TEST_FIRST;
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
@@ -126,7 +211,7 @@
break;
}
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return error ? error : n;
}
@@ -134,6 +219,105 @@
power_attr(pm_test);
#endif /* CONFIG_PM_DEBUG */
+#ifdef CONFIG_DEBUG_FS
+static char *suspend_step_name(enum suspend_stat_step step)
+{
+ switch (step) {
+ case SUSPEND_FREEZE:
+ return "freeze";
+ case SUSPEND_PREPARE:
+ return "prepare";
+ case SUSPEND_SUSPEND:
+ return "suspend";
+ case SUSPEND_SUSPEND_NOIRQ:
+ return "suspend_noirq";
+ case SUSPEND_RESUME_NOIRQ:
+ return "resume_noirq";
+ case SUSPEND_RESUME:
+ return "resume";
+ default:
+ return "";
+ }
+}
+
+static int suspend_stats_show(struct seq_file *s, void *unused)
+{
+ int i, index, last_dev, last_errno, last_step;
+
+ last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+ last_dev %= REC_FAILED_NUM;
+ last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
+ last_errno %= REC_FAILED_NUM;
+ last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
+ last_step %= REC_FAILED_NUM;
+ seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
+ "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
+ "success", suspend_stats.success,
+ "fail", suspend_stats.fail,
+ "failed_freeze", suspend_stats.failed_freeze,
+ "failed_prepare", suspend_stats.failed_prepare,
+ "failed_suspend", suspend_stats.failed_suspend,
+ "failed_suspend_late",
+ suspend_stats.failed_suspend_late,
+ "failed_suspend_noirq",
+ suspend_stats.failed_suspend_noirq,
+ "failed_resume", suspend_stats.failed_resume,
+ "failed_resume_early",
+ suspend_stats.failed_resume_early,
+ "failed_resume_noirq",
+ suspend_stats.failed_resume_noirq);
+ seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
+ suspend_stats.failed_devs[last_dev]);
+ for (i = 1; i < REC_FAILED_NUM; i++) {
+ index = last_dev + REC_FAILED_NUM - i;
+ index %= REC_FAILED_NUM;
+ seq_printf(s, "\t\t\t%-s\n",
+ suspend_stats.failed_devs[index]);
+ }
+ seq_printf(s, " last_failed_errno:\t%-d\n",
+ suspend_stats.errno[last_errno]);
+ for (i = 1; i < REC_FAILED_NUM; i++) {
+ index = last_errno + REC_FAILED_NUM - i;
+ index %= REC_FAILED_NUM;
+ seq_printf(s, "\t\t\t%-d\n",
+ suspend_stats.errno[index]);
+ }
+ seq_printf(s, " last_failed_step:\t%-s\n",
+ suspend_step_name(
+ suspend_stats.failed_steps[last_step]));
+ for (i = 1; i < REC_FAILED_NUM; i++) {
+ index = last_step + REC_FAILED_NUM - i;
+ index %= REC_FAILED_NUM;
+ seq_printf(s, "\t\t\t%-s\n",
+ suspend_step_name(
+ suspend_stats.failed_steps[index]));
+ }
+
+ return 0;
+}
+
+static int suspend_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, suspend_stats_show, NULL);
+}
+
+static const struct file_operations suspend_stats_operations = {
+ .open = suspend_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init pm_debugfs_init(void)
+{
+ debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
+ NULL, NULL, &suspend_stats_operations);
+ return 0;
+}
+
+late_initcall(pm_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
+
#endif /* CONFIG_PM_SLEEP */
struct kobject *power_kobj;
@@ -145,7 +329,7 @@
* 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
* 'disk' (Suspend-to-Disk).
*
- * store() accepts one of those strings, translates it into the
+ * store() accepts one of those strings, translates it into the
* proper enumerated value, and initiates a suspend transition.
*/
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -191,23 +375,23 @@
/* First, check if we are requested to hibernate */
if (len == 4 && !strncmp(buf, "disk", len)) {
error = hibernate();
- goto Exit;
+ goto Exit;
}
#ifdef CONFIG_SUSPEND
for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
- if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
- break;
- }
- if (state < PM_SUSPEND_MAX && *s)
+ if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
#ifdef CONFIG_EARLYSUSPEND
- if (state == PM_SUSPEND_ON || valid_state(state)) {
- error = 0;
- request_suspend_state(state);
- }
+ if (state == PM_SUSPEND_ON || valid_state(state)) {
+ error = 0;
+ request_suspend_state(state);
+ break;
+ }
#else
- error = enter_state(state);
+ error = pm_suspend(state);
#endif
+ }
+ }
#endif
Exit:
@@ -317,216 +501,7 @@
power_attr(wake_unlock);
#endif
-#ifdef CONFIG_SEC_DVFS
-DEFINE_MUTEX(dvfs_mutex);
-static unsigned long dvfs_id = 0;
-static unsigned long apps_min_freq = MIN_FREQ_LIMIT;
-static unsigned long apps_max_freq = MAX_FREQ_LIMIT;
-static unsigned long thermald_max_freq = MAX_FREQ_LIMIT;
-
-static unsigned long mhl_min_freq = MIN_FREQ_LIMIT;
-static unsigned long touch_min_freq = MAX_TOUCH_LIMIT;
-static unsigned long unicpu_max_freq = MAX_UNICPU_LIMIT;
-
-static int verify_cpufreq_target(unsigned int target)
-{
- int i;
- struct cpufreq_frequency_table *table;
-
- table = cpufreq_frequency_get_table(BOOT_CPU);
- if (table == NULL)
- return -EFAULT;
-
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- if (table[i].frequency < MIN_FREQ_LIMIT || table[i].frequency > MAX_FREQ_LIMIT)
- continue;
-
- if (target == table[i].frequency)
- return 0;
- }
-
- return -EINVAL;
-}
-
-int set_freq_limit(unsigned long id, unsigned int freq)
-{
- unsigned int min = MIN_FREQ_LIMIT;
- unsigned int max = MAX_FREQ_LIMIT;
- unsigned int cur = 0;
-
- if (id < 0)
- return -EINVAL;
-
- if (freq != 0 && freq != -1 && verify_cpufreq_target(freq))
- return -EINVAL;
-
- mutex_lock(&dvfs_mutex);
-
- if (freq == -1)
- dvfs_id &= ~id;
- else
- dvfs_id |= id;
-
- /* update freq for apps/thermald */
- if (id == DVFS_APPS_MIN_ID)
- apps_min_freq = freq;
- else if (id == DVFS_MHL_ID)
- mhl_min_freq = freq;
- else if (id == DVFS_APPS_MAX_ID)
- apps_max_freq = freq;
- else if (id == DVFS_THERMALD_ID)
- thermald_max_freq = freq;
-
- /* set min - apps */
- if (dvfs_id & DVFS_APPS_MIN_ID && min < apps_min_freq)
- min = apps_min_freq;
- if (dvfs_id & DVFS_MHL_ID && min < mhl_min_freq)
- min = mhl_min_freq;
- if (dvfs_id & DVFS_TOUCH_ID && min < touch_min_freq)
- min = touch_min_freq;
-
- /* set max */
- if (dvfs_id & DVFS_APPS_MAX_ID && max > apps_max_freq)
- max = apps_max_freq;
- if (dvfs_id & DVFS_THERMALD_ID && max > thermald_max_freq)
- max = thermald_max_freq;
- if (dvfs_id & DVFS_UNICPU_ID && max > unicpu_max_freq)
- max = unicpu_max_freq;
-
- /* check min max*/
- if (min > max)
- min = max;
-
- /* update */
- set_min_lock(min);
- set_max_lock(max);
- #ifndef CONFIG_USA_MODEL_SGH_T989
- printk("%s: 0x%lx %d, min %d, max %d\n", __FUNCTION__, id, freq, min, max);
- #endif
-
- /* need to update now */
- if (id & UPDATE_NOW_BITS)
- {
- int cpu;
-
- for_each_online_cpu(cpu) {
- cur = cpufreq_quick_get(cpu);
- if (cur) {
- struct cpufreq_policy policy;
- policy.cpu = cpu;
-
- if (cur < min)
- cpufreq_driver_target(&policy, min, CPUFREQ_RELATION_H);
- else if (cur > max)
- cpufreq_driver_target(&policy, max, CPUFREQ_RELATION_L);
- }
- }
- }
-
- mutex_unlock(&dvfs_mutex);
-
- return 0;
-}
-
-static ssize_t cpufreq_min_limit_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int freq;
-
- freq = get_min_lock();
- if (!freq)
- freq = -1;
-
- return sprintf(buf, "%d\n", freq);
-}
-
-static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- int freq_min_limit;
-
- sscanf(buf, "%d", &freq_min_limit);
-
- set_freq_limit(DVFS_APPS_MIN_ID, freq_min_limit);
-#ifdef CONFIG_SEC_DVFS_DUAL
- if (freq_min_limit == MAX_FREQ_LIMIT)
- dual_boost(1);
- else
- dual_boost(0);
-#endif
- return n;
-}
-
-static ssize_t cpufreq_max_limit_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int freq;
-
- freq = get_max_lock();
- if (!freq)
- freq = -1;
-
- return sprintf(buf, "%d\n", freq);
-}
-
-static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- int freq_max_limit;
-
- sscanf(buf, "%d", &freq_max_limit);
-
- set_freq_limit(DVFS_APPS_MAX_ID, freq_max_limit);
-
- return n;
-}
-static ssize_t cpufreq_table_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- ssize_t len = 0;
- int i, count;
- unsigned int freq;
-
- struct cpufreq_frequency_table *table;
-
- table = cpufreq_frequency_get_table(BOOT_CPU);
- if (table == NULL)
- return 0;
-
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) ;
- count = i;
-
- for (i = count-1; i >= 0; i--) {
- freq = table[i].frequency;
-
- if (freq < MIN_FREQ_LIMIT || freq > MAX_FREQ_LIMIT)
- continue;
-
- len += sprintf(buf + len, "%u ", freq);
- }
-
- len--;
- len += sprintf(buf + len, "\n");
-
- return len;
-}
-
-static ssize_t cpufreq_table_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- printk("%s: Not supported\n", __func__);
- return n;
-}
-
-power_attr(cpufreq_max_limit);
-power_attr(cpufreq_min_limit);
-power_attr(cpufreq_table);
-#endif
-
-static struct attribute * g[] = {
+static struct attribute *g[] = {
&state_attr.attr,
#ifdef CONFIG_PM_TRACE
&pm_trace_attr.attr,
@@ -535,6 +510,8 @@
#ifdef CONFIG_PM_SLEEP
&pm_async_attr.attr,
&wakeup_count_attr.attr,
+ &touch_event_attr.attr,
+ &touch_event_timer_attr.attr,
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
@@ -543,11 +520,6 @@
&wake_unlock_attr.attr,
#endif
#endif
-#ifdef CONFIG_SEC_DVFS
- &cpufreq_min_limit_attr.attr,
- &cpufreq_max_limit_attr.attr,
- &cpufreq_table_attr.attr,
-#endif
NULL,
};
@@ -576,6 +548,13 @@
return error;
hibernate_image_size_init();
hibernate_reserved_size_init();
+
+ touch_evt_timer_val = ktime_set(2, 0);
+ hrtimer_init(&tc_ev_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tc_ev_timer.function = &tc_ev_stop;
+ tc_ev_processed = 1;
+
+
power_kobj = kobject_create_and_add("power", NULL);
if (!power_kobj)
return -ENOMEM;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index b477a9b..da9ec86 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -8,7 +8,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -22,8 +21,10 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
+#include <linux/rtc.h>
#include <linux/ftrace.h>
#include <trace/events/power.h>
@@ -40,15 +41,16 @@
static const struct platform_suspend_ops *suspend_ops;
/**
- * suspend_set_ops - Set the global suspend method table.
- * @ops: Pointer to ops structure.
+ * suspend_set_ops - Set the global suspend method table.
+ * @ops: Suspend operations to use.
*/
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
suspend_ops = ops;
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
}
+EXPORT_SYMBOL_GPL(suspend_set_ops);
bool valid_state(suspend_state_t state)
{
@@ -60,16 +62,17 @@
}
/**
- * suspend_valid_only_mem - generic memory-only valid callback
+ * suspend_valid_only_mem - Generic memory-only valid callback.
*
- * Platform drivers that implement mem suspend only and only need
- * to check for that in their .valid callback can use this instead
- * of rolling their own .valid callback.
+ * Platform drivers that implement mem suspend only and only need to check for
+ * that in their .valid() callback can use this instead of rolling their own
+ * .valid() callback.
*/
int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
+EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static int suspend_test(int level)
{
@@ -84,10 +87,11 @@
}
/**
- * suspend_prepare - Do prep work before entering low-power state.
+ * suspend_prepare - Prepare for entering system sleep state.
*
- * This is common code that is called for each state that we're entering.
- * Run suspend notifiers, allocate a console and stop all processes.
+ * Common code run for every system sleep state that can be entered (except for
+ * hibernation). Run suspend notifiers, allocate the "suspend" console and
+ * freeze processes.
*/
static int suspend_prepare(void)
{
@@ -102,14 +106,12 @@
if (error)
goto Finish;
- if (error)
- goto Finish;
-
error = suspend_freeze_processes();
if (!error)
return 0;
- suspend_thaw_processes();
+ suspend_stats.failed_freeze++;
+ dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
@@ -129,12 +131,13 @@
}
/**
- * suspend_enter - enter the desired system sleep state.
- * @state: state to enter
+ * suspend_enter - Make the system enter the given sleep state.
+ * @state: System sleep state to enter.
+ * @wakeup: Returns information that the sleep state should not be re-entered.
*
- * This function should be called after devices have been suspended.
+ * This function should be called after devices have been suspended.
*/
-static int suspend_enter(suspend_state_t state)
+static int suspend_enter(suspend_state_t state, bool *wakeup)
{
int error;
@@ -168,7 +171,8 @@
error = syscore_suspend();
if (!error) {
- if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
+ *wakeup = pm_wakeup_pending();
+ if (!(suspend_test(TEST_CORE) || *wakeup)) {
error = suspend_ops->enter(state);
events_check_enabled = false;
}
@@ -195,13 +199,13 @@
}
/**
- * suspend_devices_and_enter - suspend devices and enter the desired system
- * sleep state.
- * @state: state to enter
+ * suspend_devices_and_enter - Suspend devices and enter system sleep state.
+ * @state: System sleep state to enter.
*/
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
+ bool wakeup = false;
if (!suspend_ops)
return -ENOSYS;
@@ -224,7 +228,10 @@
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
- error = suspend_enter(state);
+ do {
+ error = suspend_enter(state, &wakeup);
+ } while (!error && !wakeup
+ && suspend_ops->suspend_again && suspend_ops->suspend_again());
Resume_devices:
suspend_test_start();
@@ -245,10 +252,10 @@
}
/**
- * suspend_finish - Do final work before exiting suspend sequence.
+ * suspend_finish - Clean up before finishing the suspend sequence.
*
- * Call platform code to clean up, restart processes, and free the
- * console that we've allocated. This is not called for suspend-to-disk.
+ * Call platform code to clean up, restart processes, and free the console that
+ * we've allocated. This routine is not called for hibernation.
*/
static void suspend_finish(void)
{
@@ -258,16 +265,14 @@
}
/**
- * enter_state - Do common work of entering low-power state.
- * @state: pm_state structure for state we're entering.
+ * enter_state - Do common work needed to enter system sleep state.
+ * @state: System sleep state to enter.
*
- * Make sure we're the only ones trying to enter a sleep state. Fail
- * if someone has beat us to it, since we don't want anything weird to
- * happen when we wake up.
- * Then, do the setup for suspend, enter the state, and cleaup (after
- * we've woken up).
+ * Make sure that no one else is trying to put the system into a sleep state.
+ * Fail if that's not the case. Otherwise, prepare for system suspend, make the
+ * system enter the given sleep state and clean up after wakeup.
*/
-int enter_state(suspend_state_t state)
+static int enter_state(suspend_state_t state)
{
int error;
@@ -278,7 +283,6 @@
return -EBUSY;
suspend_sys_sync_queue();
-
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
error = suspend_prepare();
if (error)
@@ -300,16 +304,41 @@
return error;
}
+static void pm_suspend_marker(char *annotation)
+{
+ struct timespec ts;
+ struct rtc_time tm;
+
+ getnstimeofday(&ts);
+ rtc_time_to_tm(ts.tv_sec, &tm);
+ pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
+ annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+}
+
/**
- * pm_suspend - Externally visible function for suspending system.
- * @state: Enumerated value of state to enter.
+ * pm_suspend - Externally visible function for suspending the system.
+ * @state: System sleep state to enter.
*
- * Determine whether or not value is within range, get state
- * structure, and enter (above).
+ * Check if the value of @state represents one of the supported states,
+ * execute enter_state() and update system suspend statistics.
*/
int pm_suspend(suspend_state_t state)
{
- if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX)
- return enter_state(state);
- return -EINVAL;
+ int error;
+
+ if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
+ return -EINVAL;
+
+ pm_suspend_marker("entry");
+ error = enter_state(state);
+ if (error) {
+ suspend_stats.fail++;
+ dpm_save_failed_errno(error);
+ } else {
+ suspend_stats.success++;
+ }
+ pm_suspend_marker("exit");
+ return error;
}
+EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5592eba..1bf4b0e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -42,7 +42,6 @@
#include <linux/lockdep.h>
#include <linux/idr.h>
#include <linux/bug.h>
-#include <linux/moduleparam.h>
#include "workqueue_sched.h"
@@ -263,32 +262,19 @@
char name[]; /* I: workqueue name */
};
-/* see the comment above the definition of WQ_POWER_EFFICIENT */
-#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
-static bool wq_power_efficient = true;
-#else
-static bool wq_power_efficient;
-#endif
-
-module_param_named(power_efficient, wq_power_efficient, bool, 0644);
-
struct workqueue_struct *system_wq __read_mostly;
struct workqueue_struct *system_long_wq __read_mostly;
struct workqueue_struct *system_nrt_wq __read_mostly;
struct workqueue_struct *system_unbound_wq __read_mostly;
struct workqueue_struct *system_freezable_wq __read_mostly;
struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
-struct workqueue_struct *system_power_efficient_wq __read_mostly;
-struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_wq);
EXPORT_SYMBOL_GPL(system_long_wq);
EXPORT_SYMBOL_GPL(system_nrt_wq);
EXPORT_SYMBOL_GPL(system_unbound_wq);
EXPORT_SYMBOL_GPL(system_freezable_wq);
EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
-EXPORT_SYMBOL_GPL(system_power_efficient_wq);
-EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
-
+
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
@@ -3030,10 +3016,6 @@
struct workqueue_struct *wq;
unsigned int cpu;
size_t namelen;
- /* see the comment above the definition of WQ_POWER_EFFICIENT */
- if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
- flags |= WQ_UNBOUND;
-
/* determine namelen, allocate wq and format name */
va_start(args, lock_name);
@@ -4005,11 +3987,6 @@
WQ_FREEZABLE, 0);
system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
- system_power_efficient_wq = alloc_workqueue("events_power_efficient",
- WQ_POWER_EFFICIENT, 0);
- system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
- WQ_FREEZABLE | WQ_POWER_EFFICIENT,
- 0);
BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
!system_unbound_wq || !system_freezable_wq ||
!system_nrt_freezable_wq);