Initial Contribution
msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e691818..6ebda1d 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -164,6 +164,14 @@
{
struct freezer *freezer;
+ if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
+ const struct cred *cred = current_cred(), *tcred;
+
+ tcred = __task_cred(task);
+ if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EPERM;
+ }
+
/*
* Anything frozen can't move or be moved to/from.
*/
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9c9b754..f4c101b 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1373,6 +1373,13 @@
{
struct cpuset *cs = cgroup_cs(cont);
+ if ((current != tsk) && (!capable(CAP_SYS_ADMIN))) {
+ const struct cred *cred = current_cred(), *tcred;
+
+ if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EPERM;
+ }
+
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
return -ENOSPC;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a7840ae..3b67d89 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -521,6 +521,32 @@
}
EXPORT_SYMBOL(irq_set_irq_wake);
+/**
+ * irq_read_line - read the value on an irq line
+ * @irq: Interrupt number representing a hardware line
+ *
+ * This function is meant to be called from within the irq handler.
+ * Slowbus irq controllers might sleep, but it is assumed that the irq
+ * handler for slowbus interrupts will execute in thread context, so
+ * sleeping is okay.
+ */
+int irq_read_line(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int val;
+
+ if (!desc || !desc->irq_data.chip->irq_read_line)
+ return -EINVAL;
+
+ chip_bus_lock(desc);
+ raw_spin_lock(&desc->lock);
+ val = desc->irq_data.chip->irq_read_line(&desc->irq_data);
+ raw_spin_unlock(&desc->lock);
+ chip_bus_sync_unlock(desc);
+ return val;
+}
+EXPORT_SYMBOL_GPL(irq_read_line);
+
/*
* Internal function that tells the architecture code whether a
* particular irq has been exclusively allocated or is available
@@ -1400,3 +1426,16 @@
return !ret ? IRQC_IS_HARDIRQ : ret;
}
EXPORT_SYMBOL_GPL(request_any_context_irq);
+
+void irq_set_pending(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+
+ if (desc) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ desc->istate |= IRQS_PENDING;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(irq_set_pending);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index f323a4c..0e5ab4d 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -50,7 +50,7 @@
struct irq_desc *desc;
int irq;
- for_each_irq_desc(irq, desc) {
+ for_each_irq_desc_reverse(irq, desc) {
unsigned long flags;
raw_spin_lock_irqsave(&desc->lock, flags);
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 37f05d0..b6ff61a 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -471,9 +471,11 @@
return ret;
}
ret = register_pm_qos_misc(&network_throughput_pm_qos);
- if (ret < 0)
+ if (ret < 0) {
printk(KERN_ERR
"pm_qos_param: network_throughput setup failed\n");
+ return 0;
+ }
return ret;
}
diff --git a/kernel/power/consoleearlysuspend.c b/kernel/power/consoleearlysuspend.c
index a3edcb2..aedf4aa 100644
--- a/kernel/power/consoleearlysuspend.c
+++ b/kernel/power/consoleearlysuspend.c
@@ -25,28 +25,23 @@
static int orig_fgconsole;
static void console_early_suspend(struct early_suspend *h)
{
- acquire_console_sem();
orig_fgconsole = fg_console;
if (vc_allocate(EARLY_SUSPEND_CONSOLE))
goto err;
if (set_console(EARLY_SUSPEND_CONSOLE))
goto err;
- release_console_sem();
if (vt_waitactive(EARLY_SUSPEND_CONSOLE + 1))
pr_warning("console_early_suspend: Can't switch VCs.\n");
return;
err:
pr_warning("console_early_suspend: Can't set console\n");
- release_console_sem();
}
static void console_late_resume(struct early_suspend *h)
{
int ret;
- acquire_console_sem();
ret = set_console(orig_fgconsole);
- release_console_sem();
if (ret) {
pr_warning("console_late_resume: Can't set console.\n");
return;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 63774df..6799c42 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -276,9 +276,7 @@
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
- printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
- printk("done.\n");
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
error = suspend_prepare();
diff --git a/kernel/printk.c b/kernel/printk.c
index 4835df7..ff1a52c 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1184,6 +1184,14 @@
console_unlock();
}
+static void __cpuinit console_flush(struct work_struct *work)
+{
+ console_lock();
+ console_unlock();
+}
+
+static __cpuinitdata DECLARE_WORK(console_cpu_notify_work, console_flush);
+
/**
* console_cpu_notify - print deferred console messages after CPU hotplug
* @self: notifier struct
@@ -1194,6 +1202,9 @@
* will be spooled but will not show up on the console. This function is
* called when a new CPU comes online (or fails to come up), and ensures
* that any such output gets printed.
+ *
+ * Special handling must be done for cases invoked from an atomic context,
+ * as we can't be taking the console semaphore here.
*/
static int __cpuinit console_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -1205,6 +1216,12 @@
case CPU_UP_CANCELED:
console_lock();
console_unlock();
+ /* invoked with preemption disabled, so defer */
+ case CPU_DYING:
+ if (!console_trylock())
+ schedule_work(&console_cpu_notify_work);
+ else
+ console_unlock();
}
return NOTIFY_OK;
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 3ff4017..fdd3939 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -262,6 +262,24 @@
EXPORT_SYMBOL(request_resource);
/**
+ * locate_resource - locate an already reserved I/O or memory resource
+ * @root: root resource descriptor
+ * @search: resource descriptor to be located
+ *
+ * Returns pointer to desired resource or NULL if not found.
+ */
+struct resource *locate_resource(struct resource *root, struct resource *search)
+{
+ struct resource *found;
+
+ write_lock(&resource_lock);
+ found = __request_resource(root, search);
+ write_unlock(&resource_lock);
+ return found;
+}
+EXPORT_SYMBOL(locate_resource);
+
+/**
* release_resource - release a previously reserved resource
* @old: resource pointer
*/
diff --git a/kernel/sched.c b/kernel/sched.c
index bb4035c..a13457d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4602,7 +4602,7 @@
EXPORT_SYMBOL(complete_all);
static inline long __sched
-do_wait_for_common(struct completion *x, long timeout, int state)
+do_wait_for_common(struct completion *x, long timeout, int state, int iowait)
{
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
@@ -4615,7 +4615,10 @@
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
- timeout = schedule_timeout(timeout);
+ if (iowait)
+ timeout = io_schedule_timeout(timeout);
+ else
+ timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout);
__remove_wait_queue(&x->wait, &wait);
@@ -4627,12 +4630,12 @@
}
static long __sched
-wait_for_common(struct completion *x, long timeout, int state)
+wait_for_common(struct completion *x, long timeout, int state, int iowait)
{
might_sleep();
spin_lock_irq(&x->wait.lock);
- timeout = do_wait_for_common(x, timeout, state);
+ timeout = do_wait_for_common(x, timeout, state, iowait);
spin_unlock_irq(&x->wait.lock);
return timeout;
}
@@ -4649,11 +4652,24 @@
*/
void __sched wait_for_completion(struct completion *x)
{
- wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
+ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(wait_for_completion);
/**
+ * wait_for_completion_io: - waits for completion of a task
+ * @x: holds the state of this particular completion
+ *
+ * This waits for completion of a specific task to be signaled. Treats any
+ * sleeping as waiting for IO for the purposes of process accounting.
+ */
+void __sched wait_for_completion_io(struct completion *x)
+{
+ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE, 1);
+}
+EXPORT_SYMBOL(wait_for_completion_io);
+
+/**
* wait_for_completion_timeout: - waits for completion of a task (w/timeout)
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
@@ -4665,7 +4681,7 @@
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
- return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
+ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(wait_for_completion_timeout);
@@ -4678,7 +4694,8 @@
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
- long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
+ long t =
+ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE, 0);
if (t == -ERESTARTSYS)
return t;
return 0;
@@ -4697,7 +4714,7 @@
wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
{
- return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
+ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
@@ -4710,7 +4727,7 @@
*/
int __sched wait_for_completion_killable(struct completion *x)
{
- long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
+ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE, 0);
if (t == -ERESTARTSYS)
return t;
return 0;
@@ -4730,7 +4747,7 @@
wait_for_completion_killable_timeout(struct completion *x,
unsigned long timeout)
{
- return wait_for_common(x, timeout, TASK_KILLABLE);
+ return wait_for_common(x, timeout, TASK_KILLABLE, 0);
}
EXPORT_SYMBOL(wait_for_completion_killable_timeout);
@@ -5723,6 +5740,7 @@
delayacct_blkio_end();
return ret;
}
+EXPORT_SYMBOL(io_schedule_timeout);
/**
* sys_sched_get_priority_max - return maximum RT priority.
@@ -7808,6 +7826,9 @@
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus();
return NOTIFY_OK;
@@ -8971,6 +8992,15 @@
static int
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
+ if ((current != tsk) && (!capable(CAP_SYS_NICE))) {
+ const struct cred *cred = current_cred(), *tcred;
+
+ tcred = __task_cred(tsk);
+
+ if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EPERM;
+ }
+
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
return -EINVAL;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ea468b1..4c5eac4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -985,6 +985,19 @@
.proc_handler = proc_dointvec,
},
#endif
+#ifdef CONFIG_ARM
+ {
+ .procname = "boot_reason",
+ .data = &boot_reason,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+},
+#endif
+/*
+ * NOTE: do not add new entries to this table unless you have read
+ * Documentation/sysctl/ctl_unnumbered.txt
+ */
{ }
};
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 3b8e028..4406ba2 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -137,6 +137,7 @@
{ CTL_INT, KERN_COMPAT_LOG, "compat-log" },
{ CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
{ CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
+ { CTL_INT, KERN_BOOT_REASON, "boot_reason" },
{}
};
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0400553..d86d61f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3069,6 +3069,32 @@
EXPORT_SYMBOL_GPL(destroy_workqueue);
/**
+ * workqueue_empty - test whether a workqueue is empty
+ * @wq: target workqueue
+ *
+ * Test whether @wq's cpu workqueue(s) are empty.
+ *
+ * Returns: false - workqueue is not empty
+ * true - workqueue is empty
+ */
+bool workqueue_empty(struct workqueue_struct *wq)
+{
+ int cpu;
+
+ for_each_cwq_cpu(cpu, wq) {
+ struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+
+ if (!cwq->nr_active && list_empty(&cwq->delayed_works))
+ continue;
+
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(workqueue_empty);
+
+/**
* workqueue_set_max_active - adjust max_active of a workqueue
* @wq: target workqueue
* @max_active: new max_active value.