Merge tag 'v3.4.108' into mm-6.0
Change-Id: I5ee718e5c87c9647c6edf0926a887679e065a649
Signed-off-by: José Adolfo Galdámez <josegalre@pac-rom.com>
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 67b847d..d1342c2 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2598,7 +2598,7 @@
#define K(x) ((x) << (PAGE_SHIFT - 10))
kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
"Buffers: %8lu kB\n",
- val.totalram, val.freeram, val.bufferram);
+ K(val.totalram), K(val.freeram), K(val.bufferram));
return 0;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c533d2d..6734f40 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3913,6 +3913,13 @@
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
+ int rctx;
+
+ rctx = perf_swevent_get_recursion_context();
+ /*
+ * If we 'fail' here, that's OK, it means recursion is already disabled
+ * and we won't recurse 'further'.
+ */
if (event->pending_disable) {
event->pending_disable = 0;
@@ -3923,6 +3930,9 @@
event->pending_wakeup = 0;
perf_event_wakeup(event);
}
+
+ if (rctx >= 0)
+ perf_swevent_put_recursion_context(rctx);
}
/*
diff --git a/kernel/printk.c b/kernel/printk.c
index afad915..af59a78 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -131,7 +131,7 @@
*/
struct console_cmdline
{
- char name[8]; /* Name of the driver */
+ char name[16]; /* Name of the driver */
int index; /* Minor dev. to use */
char *options; /* Options for the driver */
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
@@ -1581,6 +1581,7 @@
*/
for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
i++) {
+ BUILD_BUG_ON(sizeof(console_cmdline[i].name) != sizeof(newcon->name));
if (strcmp(console_cmdline[i].name, newcon->name) != 0)
continue;
if (newcon->index >= 0 &&
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 351de0e..9f0ef30 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4069,10 +4069,13 @@
if (running)
p->sched_class->put_prev_task(rq, p);
- if (rt_prio(prio))
+ if (rt_prio(prio)) {
p->sched_class = &rt_sched_class;
- else
+ } else {
+ if (rt_prio(oldprio))
+ p->rt.timeout = 0;
p->sched_class = &fair_sched_class;
+ }
p->prio = prio;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 671f959..5cc401e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -194,22 +194,28 @@
EXPORT_SYMBOL(local_bh_enable_ip);
/*
- * We restart softirq processing MAX_SOFTIRQ_RESTART times,
- * and we fall back to softirqd after that.
+ * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
+ * but break the loop if need_resched() is set or after 2 ms.
+ * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
+ * certain cases, such as stop_machine(), jiffies may cease to
+ * increment and so we need the MAX_SOFTIRQ_RESTART limit as
+ * well to make sure we eventually return from this method.
*
- * This number has been established via experimentation.
+ * These limits have been established via experimentation.
* The two things to balance is latency against fairness -
* we want to handle softirqs as soon as possible, but they
* should not be able to lock up the box.
*/
+#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
#define MAX_SOFTIRQ_RESTART 10
asmlinkage void __do_softirq(void)
{
struct softirq_action *h;
__u32 pending;
- int max_restart = MAX_SOFTIRQ_RESTART;
+ unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
int cpu;
+ int max_restart = MAX_SOFTIRQ_RESTART;
pending = local_softirq_pending();
account_system_vtime(current);
@@ -255,11 +261,13 @@
local_irq_disable();
pending = local_softirq_pending();
- if (pending && --max_restart)
- goto restart;
+ if (pending) {
+ if (time_before(jiffies, end) && !need_resched() &&
+ --max_restart)
+ goto restart;
- if (pending)
wakeup_softirqd();
+ }
lockdep_softirq_exit();
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5b6bd45..ffea326 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1014,6 +1014,12 @@
return ret;
}
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int ftrace_graph_active;
+#else
+# define ftrace_graph_active 0
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -2056,24 +2062,36 @@
static void ftrace_startup_sysctl(void)
{
+ int command;
+
if (unlikely(ftrace_disabled))
return;
/* Force update next time */
saved_ftrace_func = NULL;
/* ftrace_start_up is true if we want ftrace running */
- if (ftrace_start_up)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (ftrace_start_up) {
+ command = FTRACE_UPDATE_CALLS;
+ if (ftrace_graph_active)
+ command |= FTRACE_START_FUNC_RET;
+ ftrace_startup_enable(command);
+ }
}
static void ftrace_shutdown_sysctl(void)
{
+ int command;
+
if (unlikely(ftrace_disabled))
return;
/* ftrace_start_up is true if ftrace is running */
- if (ftrace_start_up)
- ftrace_run_update_code(FTRACE_DISABLE_CALLS);
+ if (ftrace_start_up) {
+ command = FTRACE_DISABLE_CALLS;
+ if (ftrace_graph_active)
+ command |= FTRACE_STOP_FUNC_RET;
+ ftrace_run_update_code(command);
+ }
}
static cycle_t ftrace_update_time;
@@ -4445,12 +4463,12 @@
if (ftrace_enabled) {
- ftrace_startup_sysctl();
-
/* we are starting ftrace again */
if (ftrace_ops_list != &ftrace_list_end)
update_ftrace_function();
+ ftrace_startup_sysctl();
+
} else {
/* stopping ftrace calls (just send to ftrace_stub) */
ftrace_trace_function = ftrace_stub;
@@ -4465,7 +4483,6 @@
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static int ftrace_graph_active;
static struct notifier_block ftrace_suspend_notifier;
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)