Merge remote-tracking branch 'common/android-3.0' into msm-3.0
* common/android-3.0: (570 commits)
misc: remove kernel debugger core
ARM: common: fiq_debugger: dump sysrq directly to console if enabled
ARM: common: fiq_debugger: add irq context debug functions
net: wireless: bcmdhd: Call init_ioctl() only if was started properly for WEXT
net: wireless: bcmdhd: Call init_ioctl() only if was started properly
net: wireless: bcmdhd: Fix possible memory leak in escan/iscan
cpufreq: interactive governor: default 20ms timer
cpufreq: interactive governor: go to intermediate hi speed before max
cpufreq: interactive governor: scale to max only if at min speed
cpufreq: interactive governor: apply intermediate load on current speed
ARM: idle: update idle ticks before call idle end notifier
input: gpio_input: don't print debounce message unless flag is set
net: wireless: bcm4329: Skip dhd_bus_stop() if bus is already down
net: wireless: bcmdhd: Skip dhd_bus_stop() if bus is already down
net: wireless: bcmdhd: Improve suspend/resume processing
net: wireless: bcmdhd: Check if FW is Ok for internal FW call
tcp: Don't nuke connections for the wrong protocol
ARM: common: fiq_debugger: make uart irq be no_suspend
net: wireless: Skip connect warning for CONFIG_CFG80211_ALLOW_RECONNECT
mm: avoid livelock on !__GFP_FS allocations
...
Conflicts:
arch/arm/mm/cache-l2x0.c
arch/arm/vfp/vfpmodule.c
drivers/mmc/core/host.c
kernel/power/wakelock.c
net/bluetooth/hci_event.c
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/kernel/futex.c b/kernel/futex.c
index 0a30897..11cbe05 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -218,6 +218,8 @@
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
+ * @rw: mapping needs to be read/write (values: VERIFY_READ,
+ * VERIFY_WRITE)
*
* Returns a negative error code or 0
* The key words are stored in *key on success.
@@ -229,12 +231,12 @@
* lock_page() might sleep, the caller should not hold a spinlock.
*/
static int
-get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct page *page, *page_head;
- int err;
+ int err, ro = 0;
/*
* The futex address must be "naturally" aligned.
@@ -262,8 +264,18 @@
again:
err = get_user_pages_fast(address, 1, 1, &page);
+ /*
+ * If write access is not required (eg. FUTEX_WAIT), try
+ * and get read-only access.
+ */
+ if (err == -EFAULT && rw == VERIFY_READ) {
+ err = get_user_pages_fast(address, 1, 0, &page);
+ ro = 1;
+ }
if (err < 0)
return err;
+ else
+ err = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
page_head = page;
@@ -305,6 +317,13 @@
if (!page_head->mapping) {
unlock_page(page_head);
put_page(page_head);
+ /*
+ * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
+ * trying to find one. RW mapping would have COW'd (and thus
+ * have a mapping) so this page is RO and won't ever change.
+ */
+ if ((page_head == ZERO_PAGE(address)))
+ return -EFAULT;
goto again;
}
@@ -316,6 +335,15 @@
* the object not the particular process.
*/
if (PageAnon(page_head)) {
+ /*
+ * A RO anonymous page will never change and thus doesn't make
+ * sense for futex operations.
+ */
+ if (ro) {
+ err = -EFAULT;
+ goto out;
+ }
+
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
@@ -327,9 +355,10 @@
get_futex_key_refs(key);
+out:
unlock_page(page_head);
put_page(page_head);
- return 0;
+ return err;
}
static inline void put_futex_key(union futex_key *key)
@@ -940,7 +969,7 @@
if (!bitset)
return -EINVAL;
- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
+ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
@@ -986,10 +1015,10 @@
int ret, op_ret;
retry:
- ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
+ ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out_put_key1;
@@ -1243,10 +1272,11 @@
pi_state = NULL;
}
- ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1);
+ ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
+ requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
@@ -1790,7 +1820,7 @@
* while the syscall executes.
*/
retry:
- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key);
+ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
if (unlikely(ret != 0))
return ret;
@@ -1941,7 +1971,7 @@
}
retry:
- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key);
+ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
@@ -2060,7 +2090,7 @@
if ((uval & FUTEX_TID_MASK) != vpid)
return -EPERM;
- ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
+ ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
@@ -2249,7 +2279,7 @@
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2);
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 45e149c..2adc6b5 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -178,7 +178,7 @@
desc->depth = 1;
if (desc->irq_data.chip->irq_shutdown)
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
- if (desc->irq_data.chip->irq_disable)
+ else if (desc->irq_data.chip->irq_disable)
desc->irq_data.chip->irq_disable(&desc->irq_data);
else
desc->irq_data.chip->irq_mask(&desc->irq_data);
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index 3a2cab4..e38544d 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -246,7 +246,7 @@
gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask);
for (i = gc->irq_base; msk; msk >>= 1, i++) {
- if (!msk & 0x01)
+ if (!(msk & 0x01))
continue;
if (flags & IRQ_GC_INIT_NESTED_LOCK)
@@ -301,7 +301,7 @@
raw_spin_unlock(&gc_lock);
for (; msk; msk >>= 1, i++) {
- if (!msk & 0x01)
+ if (!(msk & 0x01))
continue;
/* Remove handler first. That will mask the irq line */
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 58f405b..640ded8 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -250,7 +250,7 @@
do {
times->utime = cputime_add(times->utime, t->utime);
times->stime = cputime_add(times->stime, t->stime);
- times->sum_exec_runtime += t->se.sum_exec_runtime;
+ times->sum_exec_runtime += task_sched_runtime(t);
} while_each_thread(tsk, t);
out:
rcu_read_unlock();
@@ -274,9 +274,7 @@
struct task_cputime sum;
unsigned long flags;
- spin_lock_irqsave(&cputimer->lock, flags);
if (!cputimer->running) {
- cputimer->running = 1;
/*
* The POSIX timer interface allows for absolute time expiry
* values through the TIMER_ABSTIME flag, therefore we have
@@ -284,8 +282,11 @@
* it.
*/
thread_group_cputime(tsk, &sum);
+ spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 1;
update_gt_cputime(&cputimer->cputime, &sum);
- }
+ } else
+ spin_lock_irqsave(&cputimer->lock, flags);
*times = cputimer->cputime;
spin_unlock_irqrestore(&cputimer->lock, flags);
}
@@ -312,7 +313,8 @@
cpu->cpu = cputime.utime;
break;
case CPUCLOCK_SCHED:
- cpu->sched = thread_group_sched_runtime(p);
+ thread_group_cputime(p, &cputime);
+ cpu->sched = cputime.sum_exec_runtime;
break;
}
return 0;
diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c
index b15f02e..5a6b2fa 100644
--- a/kernel/power/earlysuspend.c
+++ b/kernel/power/earlysuspend.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/rtc.h>
-#include <linux/syscalls.h> /* sys_sync */
#include <linux/wakelock.h>
#include <linux/workqueue.h>
@@ -103,10 +102,7 @@
}
mutex_unlock(&early_suspend_lock);
- if (debug_mask & DEBUG_SUSPEND)
- pr_info("early_suspend: sync\n");
-
- sys_sync();
+ suspend_sys_sync_queue();
abort:
spin_lock_irqsave(&state_lock, irqflags);
if (state == SUSPEND_REQUESTED_AND_SUSPENDED)
diff --git a/kernel/power/power.h b/kernel/power/power.h
index b6b9006..2d57e79 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -251,6 +251,11 @@
extern struct workqueue_struct *suspend_work_queue;
extern struct wake_lock main_wake_lock;
extern suspend_state_t requested_suspend_state;
+extern void suspend_sys_sync_queue(void);
+extern int suspend_sys_sync_wait(void);
+#else
+static inline void suspend_sys_sync_queue(void) {}
+static inline int suspend_sys_sync_wait(void) { return 0; }
#endif
#ifdef CONFIG_USER_WAKELOCK
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 31338cd..6c8c925 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/wakelock.h>
+#include "power.h"
/*
* Timeout for stopping processes
@@ -158,6 +159,10 @@
goto Exit;
printk("done.\n");
+ error = suspend_sys_sync_wait();
+ if (error)
+ goto Exit;
+
printk("Freezing remaining freezable tasks ... ");
error = try_to_freeze_tasks(false);
if (error)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 6799c42..9046443 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -276,7 +276,7 @@
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
- sys_sync();
+ suspend_sys_sync_queue();
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
error = suspend_prepare();
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index 892e3ec..23dfc40 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -52,6 +52,14 @@
struct wake_lock main_wake_lock;
suspend_state_t requested_suspend_state = PM_SUSPEND_MEM;
static struct wake_lock unknown_wakeup;
+/* flag to warn/bug if wakelocks are taken after suspend_noirq */
+static int msm_suspend_check_done;
+static struct wake_lock suspend_backoff_lock;
+
+#define SUSPEND_BACKOFF_THRESHOLD 10
+#define SUSPEND_BACKOFF_INTERVAL 10000
+
+static unsigned suspend_short_count;
#ifdef CONFIG_WAKELOCK_STAT
static struct wake_lock deleted_wake_locks;
@@ -323,10 +331,18 @@
return 0;
}
+static void suspend_backoff(void)
+{
+ pr_info("suspend: too many immediate wakeups, back off\n");
+ wake_lock_timeout(&suspend_backoff_lock,
+ msecs_to_jiffies(SUSPEND_BACKOFF_INTERVAL));
+}
+
static void suspend(struct work_struct *work)
{
int ret;
int entry_event_num;
+ struct timespec ts_entry, ts_exit;
if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
if (debug_mask & DEBUG_SUSPEND)
@@ -335,20 +351,33 @@
}
entry_event_num = current_event_num;
- sys_sync();
+ suspend_sys_sync_queue();
if (debug_mask & DEBUG_SUSPEND)
pr_info("suspend: enter suspend\n");
+ getnstimeofday(&ts_entry);
ret = pm_suspend(requested_suspend_state);
+ getnstimeofday(&ts_exit);
+
if (debug_mask & DEBUG_EXIT_SUSPEND) {
- struct timespec ts;
struct rtc_time tm;
- getnstimeofday(&ts);
- rtc_time_to_tm(ts.tv_sec, &tm);
+ rtc_time_to_tm(ts_exit.tv_sec, &tm);
pr_info("suspend: exit suspend, ret = %d "
"(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret,
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+ tm.tm_hour, tm.tm_min, tm.tm_sec, ts_exit.tv_nsec);
}
+
+ if (ts_exit.tv_sec - ts_entry.tv_sec <= 1) {
+ ++suspend_short_count;
+
+ if (suspend_short_count == SUSPEND_BACKOFF_THRESHOLD) {
+ suspend_backoff();
+ suspend_short_count = 0;
+ }
+ } else {
+ suspend_short_count = 0;
+ }
+
if (current_event_num == entry_event_num) {
if (debug_mask & DEBUG_SUSPEND)
pr_info("suspend: pm_suspend returned with no event\n");
@@ -383,11 +412,21 @@
#endif
if (debug_mask & DEBUG_SUSPEND)
pr_info("power_suspend_late return %d\n", ret);
+
+ if (ret == 0)
+ msm_suspend_check_done = 1;
return ret;
}
+static int power_resume_early(struct device *dev)
+{
+ msm_suspend_check_done = 0;
+ return 0;
+}
+
static struct dev_pm_ops power_driver_pm_ops = {
.suspend_noirq = power_suspend_late,
+ .resume_noirq = power_resume_early,
};
static struct platform_driver power_driver = {
@@ -530,12 +569,24 @@
void wake_lock(struct wake_lock *lock)
{
+ /*
+ * if wake lock is being called too late in the suspend sequence,
+ * call bug so we get to analyze the callstack
+ */
+ BUG_ON(msm_suspend_check_done);
+
wake_lock_internal(lock, 0, 0);
}
EXPORT_SYMBOL(wake_lock);
void wake_lock_timeout(struct wake_lock *lock, long timeout)
{
+ /*
+ * if wake lock is being called too late in the suspend sequence,
+ * call bug so we get to analyze the callstack
+ */
+ BUG_ON(msm_suspend_check_done);
+
wake_lock_internal(lock, timeout, 1);
}
EXPORT_SYMBOL(wake_lock_timeout);
@@ -615,6 +666,8 @@
wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main");
wake_lock(&main_wake_lock);
wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups");
+ wake_lock_init(&suspend_backoff_lock, WAKE_LOCK_SUSPEND,
+ "suspend_backoff");
ret = platform_device_register(&power_device);
if (ret) {
@@ -627,6 +680,14 @@
goto err_platform_driver_register;
}
+ INIT_COMPLETION(suspend_sys_sync_comp);
+ suspend_sys_sync_work_queue =
+ create_singlethread_workqueue("suspend_sys_sync");
+ if (suspend_sys_sync_work_queue == NULL) {
+ ret = -ENOMEM;
+ goto err_suspend_sys_sync_work_queue;
+ }
+
suspend_work_queue = create_singlethread_workqueue("suspend");
if (suspend_work_queue == NULL) {
ret = -ENOMEM;
@@ -640,10 +701,12 @@
return 0;
err_suspend_work_queue:
+err_suspend_sys_sync_work_queue:
platform_driver_unregister(&power_driver);
err_platform_driver_register:
platform_device_unregister(&power_device);
err_platform_device_register:
+ wake_lock_destroy(&suspend_backoff_lock);
wake_lock_destroy(&unknown_wakeup);
wake_lock_destroy(&main_wake_lock);
#ifdef CONFIG_WAKELOCK_STAT
@@ -658,8 +721,10 @@
remove_proc_entry("wakelocks", NULL);
#endif
destroy_workqueue(suspend_work_queue);
+ destroy_workqueue(suspend_sys_sync_work_queue);
platform_driver_unregister(&power_driver);
platform_device_unregister(&power_device);
+ wake_lock_destroy(&suspend_backoff_lock);
wake_lock_destroy(&unknown_wakeup);
wake_lock_destroy(&main_wake_lock);
#ifdef CONFIG_WAKELOCK_STAT
diff --git a/kernel/printk.c b/kernel/printk.c
index bddd32b..b790764 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1653,7 +1653,7 @@
struct console *con;
for_each_console(con) {
- if (con->flags & CON_BOOT) {
+ if (!keep_bootcon && con->flags & CON_BOOT) {
printk(KERN_INFO "turn off boot console %s%d\n",
con->name, con->index);
unregister_console(con);
diff --git a/kernel/sched.c b/kernel/sched.c
index 8388f03..232c1c0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3714,30 +3714,6 @@
}
/*
- * Return sum_exec_runtime for the thread group.
- * In case the task is currently running, return the sum plus current's
- * pending runtime that have not been accounted yet.
- *
- * Note that the thread group might have other running tasks as well,
- * so the return value not includes other pending runtime that other
- * running tasks might have.
- */
-unsigned long long thread_group_sched_runtime(struct task_struct *p)
-{
- struct task_cputime totals;
- unsigned long flags;
- struct rq *rq;
- u64 ns;
-
- rq = task_rq_lock(p, &flags);
- thread_group_cputime(p, &totals);
- ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
- task_rq_unlock(rq, p, &flags);
-
- return ns;
-}
-
-/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
@@ -4243,9 +4219,9 @@
}
/*
- * schedule() is the main scheduler function.
+ * __schedule() is the main scheduler function.
*/
-asmlinkage void __sched schedule(void)
+static void __sched __schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
@@ -4286,16 +4262,6 @@
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
-
- /*
- * If we are going to sleep and we have plugged IO
- * queued, make sure to submit it to avoid deadlocks.
- */
- if (blk_needs_flush_plug(prev)) {
- raw_spin_unlock(&rq->lock);
- blk_schedule_flush_plug(prev);
- raw_spin_lock(&rq->lock);
- }
}
switch_count = &prev->nvcsw;
}
@@ -4333,6 +4299,26 @@
if (need_resched())
goto need_resched;
}
+
+static inline void sched_submit_work(struct task_struct *tsk)
+{
+ if (!tsk->state)
+ return;
+ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+ */
+ if (blk_needs_flush_plug(tsk))
+ blk_schedule_flush_plug(tsk);
+}
+
+asmlinkage void __sched schedule(void)
+{
+ struct task_struct *tsk = current;
+
+ sched_submit_work(tsk);
+ __schedule();
+}
EXPORT_SYMBOL(schedule);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4406,7 +4392,7 @@
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
- schedule();
+ __schedule();
sub_preempt_count_notrace(PREEMPT_ACTIVE);
/*
@@ -4434,7 +4420,7 @@
do {
add_preempt_count(PREEMPT_ACTIVE);
local_irq_enable();
- schedule();
+ __schedule();
local_irq_disable();
sub_preempt_count(PREEMPT_ACTIVE);
@@ -5576,7 +5562,7 @@
static void __cond_resched(void)
{
add_preempt_count(PREEMPT_ACTIVE);
- schedule();
+ __schedule();
sub_preempt_count(PREEMPT_ACTIVE);
}
@@ -7432,6 +7418,7 @@
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
if (sd && (sd->flags & SD_OVERLAP))
free_sched_groups(sd->groups, 0);
+ kfree(*per_cpu_ptr(sdd->sd, j));
kfree(*per_cpu_ptr(sdd->sg, j));
kfree(*per_cpu_ptr(sdd->sgp, j));
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 10d0182..17f2319 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1038,7 +1038,7 @@
*/
if (curr && unlikely(rt_task(curr)) &&
(curr->rt.nr_cpus_allowed < 2 ||
- curr->prio < p->prio) &&
+ curr->prio <= p->prio) &&
(p->rt.nr_cpus_allowed > 1)) {
int target = find_lowest_rq(p);
@@ -1569,7 +1569,7 @@
p->rt.nr_cpus_allowed > 1 &&
rt_task(rq->curr) &&
(rq->curr->rt.nr_cpus_allowed < 2 ||
- rq->curr->prio < p->prio))
+ rq->curr->prio <= p->prio))
push_rt_tasks(rq);
}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index e3516b2..0cae1cc 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -132,8 +132,8 @@
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
}
+DEFINE_MUTEX(stop_cpus_mutex);
/* static data for stop_cpus */
-static DEFINE_MUTEX(stop_cpus_mutex);
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
diff --git a/kernel/sys.c b/kernel/sys.c
index e4128b2..f88dadc 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -38,6 +38,8 @@
#include <linux/fs_struct.h>
#include <linux/gfp.h>
#include <linux/syscore_ops.h>
+#include <linux/version.h>
+#include <linux/ctype.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -45,6 +47,8 @@
#include <linux/user_namespace.h>
#include <linux/kmsg_dump.h>
+/* Move somewhere else to avoid recompiling? */
+#include <generated/utsrelease.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -1124,6 +1128,34 @@
#define override_architecture(name) 0
#endif
+/*
+ * Work around broken programs that cannot handle "Linux 3.0".
+ * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
+ */
+static int override_release(char __user *release, int len)
+{
+ int ret = 0;
+ char buf[65];
+
+ if (current->personality & UNAME26) {
+ char *rest = UTS_RELEASE;
+ int ndots = 0;
+ unsigned v;
+
+ while (*rest) {
+ if (*rest == '.' && ++ndots >= 3)
+ break;
+ if (!isdigit(*rest) && *rest != '.')
+ break;
+ rest++;
+ }
+ v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
+ snprintf(buf, len, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, len);
+ }
+ return ret;
+}
+
SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
{
int errno = 0;
@@ -1133,6 +1165,8 @@
errno = -EFAULT;
up_read(&uts_sem);
+ if (!errno && override_release(name->release, sizeof(name->release)))
+ errno = -EFAULT;
if (!errno && override_architecture(name))
errno = -EFAULT;
return errno;
@@ -1154,6 +1188,8 @@
error = -EFAULT;
up_read(&uts_sem);
+ if (!error && override_release(name->release, sizeof(name->release)))
+ error = -EFAULT;
if (!error && override_architecture(name))
error = -EFAULT;
return error;
@@ -1188,6 +1224,8 @@
if (!error && override_architecture(name))
error = -EFAULT;
+ if (!error && override_release(name->release, sizeof(name->release)))
+ error = -EFAULT;
return error ? -EFAULT : 0;
}
#endif
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 59f369f..ea5e1a9 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -441,6 +441,8 @@
static void alarm_timer_get(struct k_itimer *timr,
struct itimerspec *cur_setting)
{
+ memset(cur_setting, 0, sizeof(struct itimerspec));
+
cur_setting->it_interval =
ktime_to_timespec(timr->it.alarmtimer.period);
cur_setting->it_value =
@@ -479,11 +481,17 @@
if (!rtcdev)
return -ENOTSUPP;
- /* Save old values */
- old_setting->it_interval =
- ktime_to_timespec(timr->it.alarmtimer.period);
- old_setting->it_value =
- ktime_to_timespec(timr->it.alarmtimer.node.expires);
+ /*
+ * XXX HACK! Currently we can DOS a system if the interval
+ * period on alarmtimers is too small. Cap the interval here
+ * to 100us and solve this properly in a future patch! -jstultz
+ */
+ if ((new_setting->it_interval.tv_sec == 0) &&
+ (new_setting->it_interval.tv_nsec < 100000))
+ new_setting->it_interval.tv_nsec = 100000;
+
+ if (old_setting)
+ alarm_timer_get(timr, old_setting);
/* If the timer was already set, cancel it */
alarm_cancel(&timr->it.alarmtimer);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 908038f..ef9271b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1744,10 +1744,36 @@
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
+static int ops_traces_mod(struct ftrace_ops *ops)
+{
+ struct ftrace_hash *hash;
+
+ hash = ops->filter_hash;
+ return !!(!hash || !hash->count);
+}
+
static int ftrace_update_code(struct module *mod)
{
struct dyn_ftrace *p;
cycle_t start, stop;
+ unsigned long ref = 0;
+
+ /*
+ * When adding a module, we need to check if tracers are
+ * currently enabled and if they are set to trace all functions.
+ * If they are, we need to enable the module functions as well
+ * as update the reference counts for those function records.
+ */
+ if (mod) {
+ struct ftrace_ops *ops;
+
+ for (ops = ftrace_ops_list;
+ ops != &ftrace_list_end; ops = ops->next) {
+ if (ops->flags & FTRACE_OPS_FL_ENABLED &&
+ ops_traces_mod(ops))
+ ref++;
+ }
+ }
start = ftrace_now(raw_smp_processor_id());
ftrace_update_cnt = 0;
@@ -1760,7 +1786,7 @@
p = ftrace_new_addrs;
ftrace_new_addrs = p->newlist;
- p->flags = 0L;
+ p->flags = ref;
/*
* Do the initial record conversion from mcount jump
@@ -1783,7 +1809,7 @@
* conversion puts the module to the correct state, thus
* passing the ftrace_make_call check.
*/
- if (ftrace_start_up) {
+ if (ftrace_start_up && ref) {
int failed = __ftrace_replace_code(p, 1);
if (failed) {
ftrace_bug(failed, p->ip);
@@ -2407,10 +2433,9 @@
*/
static int
-ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
+ftrace_mod_callback(struct ftrace_hash *hash,
+ char *func, char *cmd, char *param, int enable)
{
- struct ftrace_ops *ops = &global_ops;
- struct ftrace_hash *hash;
char *mod;
int ret = -EINVAL;
@@ -2430,11 +2455,6 @@
if (!strlen(mod))
return ret;
- if (enable)
- hash = ops->filter_hash;
- else
- hash = ops->notrace_hash;
-
ret = ftrace_match_module_records(hash, func, mod);
if (!ret)
ret = -EINVAL;
@@ -2760,7 +2780,7 @@
mutex_lock(&ftrace_cmd_mutex);
list_for_each_entry(p, &ftrace_commands, list) {
if (strcmp(p->name, command) == 0) {
- ret = p->func(func, command, next, enable);
+ ret = p->func(hash, func, command, next, enable);
goto out_unlock;
}
}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 8d0e1cc..c7b0c6a 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -324,7 +324,8 @@
}
static int
-ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
+ftrace_trace_onoff_callback(struct ftrace_hash *hash,
+ char *glob, char *cmd, char *param, int enable)
{
struct ftrace_probe_ops *ops;
void *count = (void *)-1;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0400553..aec02b6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3026,8 +3026,13 @@
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+ bool drained;
- if (!cwq->nr_active && list_empty(&cwq->delayed_works))
+ spin_lock_irq(&cwq->gcwq->lock);
+ drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
+ spin_unlock_irq(&cwq->gcwq->lock);
+
+ if (drained)
continue;
if (++flush_cnt == 10 ||