Merge remote-tracking branch 'common/android-3.0' into msm-3.0
* common/android-3.0: (570 commits)
misc: remove kernel debugger core
ARM: common: fiq_debugger: dump sysrq directly to console if enabled
ARM: common: fiq_debugger: add irq context debug functions
net: wireless: bcmdhd: Call init_ioctl() only if was started properly for WEXT
net: wireless: bcmdhd: Call init_ioctl() only if was started properly
net: wireless: bcmdhd: Fix possible memory leak in escan/iscan
cpufreq: interactive governor: default 20ms timer
cpufreq: interactive governor: go to intermediate hi speed before max
cpufreq: interactive governor: scale to max only if at min speed
cpufreq: interactive governor: apply intermediate load on current speed
ARM: idle: update idle ticks before call idle end notifier
input: gpio_input: don't print debounce message unless flag is set
net: wireless: bcm4329: Skip dhd_bus_stop() if bus is already down
net: wireless: bcmdhd: Skip dhd_bus_stop() if bus is already down
net: wireless: bcmdhd: Improve suspend/resume processing
net: wireless: bcmdhd: Check if FW is Ok for internal FW call
tcp: Don't nuke connections for the wrong protocol
ARM: common: fiq_debugger: make uart irq be no_suspend
net: wireless: Skip connect warning for CONFIG_CFG80211_ALLOW_RECONNECT
mm: avoid livelock on !__GFP_FS allocations
...
Conflicts:
arch/arm/mm/cache-l2x0.c
arch/arm/vfp/vfpmodule.c
drivers/mmc/core/host.c
kernel/power/wakelock.c
net/bluetooth/hci_event.c
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f34719f..34ffd9e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1344,6 +1344,20 @@
source "drivers/pcmcia/Kconfig"
+config ARM_ERRATA_764369
+ bool "ARM errata: Data cache line maintenance operation by MVA may not succeed"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for erratum 764369
+ affecting Cortex-A9 MPCore with two or more processors (all
+ current revisions). Under certain timing circumstances, a data
+ cache line maintenance operation by MVA targeting an Inner
+ Shareable memory region may fail to proceed up to either the
+ Point of Coherency or to the Point of Unification of the
+ system. This workaround adds a DSB instruction before the
+ relevant cache maintenance functions and sets a specific bit
+ in the diagnostic control register of the SCU.
+
endmenu
menu "Kernel Features"
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 66ed0c3..ad180b2 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -49,12 +49,11 @@
bool "FIQ Mode Serial Debugger"
select FIQ
select FIQ_GLUE
- select KERNEL_DEBUGGER_CORE
default n
help
The FIQ serial debugger can accept commands even when the
kernel is unresponsive due to being stuck with interrupts
- disabled. Depends on the kernel debugger core in drivers/misc.
+ disabled.
config FIQ_DEBUGGER_NO_SLEEP
diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c
index 080f69e..3ed18ae 100644
--- a/arch/arm/common/fiq_debugger.c
+++ b/arch/arm/common/fiq_debugger.c
@@ -22,12 +22,12 @@
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <linux/kernel_debugger.h>
#include <linux/kernel_stat.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/smp.h>
#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
@@ -53,6 +53,7 @@
struct fiq_glue_handler handler;
int fiq;
+ int uart_irq;
int signal_irq;
int wakeup_irq;
bool wakeup_irq_no_set_wake;
@@ -71,7 +72,8 @@
bool debug_enable;
bool ignore_next_wakeup_irq;
struct timer_list sleep_timer;
- bool uart_clk_enabled;
+ spinlock_t sleep_timer_lock;
+ bool uart_enabled;
struct wake_lock debugger_wake_lock;
bool console_enable;
int current_cpu;
@@ -84,6 +86,7 @@
struct tty_struct *tty;
int tty_open_count;
struct fiq_debugger_ringbuf *tty_rbuf;
+ bool syslog_dumping;
#endif
unsigned int last_irqs[NR_IRQS];
@@ -130,18 +133,42 @@
}
#endif
+static bool inline debug_have_fiq(struct fiq_debugger_state *state)
+{
+ return (state->fiq >= 0);
+}
+
static void debug_force_irq(struct fiq_debugger_state *state)
{
unsigned int irq = state->signal_irq;
- if (state->pdata->force_irq)
+
+ if (WARN_ON(!debug_have_fiq(state)))
+ return;
+ if (state->pdata->force_irq) {
state->pdata->force_irq(state->pdev, irq);
- else {
+ } else {
struct irq_chip *chip = irq_get_chip(irq);
if (chip && chip->irq_retrigger)
chip->irq_retrigger(irq_get_irq_data(irq));
}
}
+static void debug_uart_enable(struct fiq_debugger_state *state)
+{
+ if (state->clk)
+ clk_enable(state->clk);
+ if (state->pdata->uart_enable)
+ state->pdata->uart_enable(state->pdev);
+}
+
+static void debug_uart_disable(struct fiq_debugger_state *state)
+{
+ if (state->pdata->uart_disable)
+ state->pdata->uart_disable(state->pdev);
+ if (state->clk)
+ clk_disable(state->clk);
+}
+
static void debug_uart_flush(struct fiq_debugger_state *state)
{
if (state->pdata->uart_flush)
@@ -447,6 +474,81 @@
tail = user_backtrace(state, tail);
}
+static void do_ps(struct fiq_debugger_state *state)
+{
+ struct task_struct *g;
+ struct task_struct *p;
+ unsigned task_state;
+ static const char stat_nam[] = "RSDTtZX";
+
+ debug_printf(state, "pid ppid prio task pc\n");
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ task_state = p->state ? __ffs(p->state) + 1 : 0;
+ debug_printf(state,
+ "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+ debug_printf(state, "%-13.13s %c", p->comm,
+ task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+ if (task_state == TASK_RUNNING)
+ debug_printf(state, " running\n");
+ else
+ debug_printf(state, " %08lx\n", thread_saved_pc(p));
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = true;
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+ char buf[128];
+ int ret;
+ int idx = 0;
+
+ while (1) {
+ ret = log_buf_copy(buf, idx, sizeof(buf) - 1);
+ if (ret <= 0)
+ break;
+ buf[ret] = 0;
+ debug_printf(state, "%s", buf);
+ idx += ret;
+ }
+}
+#endif
+
+static void do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+ begin_syslog_dump(state);
+ handle_sysrq(rq);
+ end_syslog_dump(state);
+}
+
+/* This function CANNOT be called in FIQ context */
+static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+ if (!strcmp(cmd, "ps"))
+ do_ps(state);
+ if (!strcmp(cmd, "sysrq"))
+ do_sysrq(state, 'h');
+ if (!strncmp(cmd, "sysrq ", 6))
+ do_sysrq(state, cmd[6]);
+}
+
static void debug_help(struct fiq_debugger_state *state)
{
debug_printf(state, "FIQ Debugger commands:\n"
@@ -463,16 +565,34 @@
" console Switch terminal to console\n"
" cpu Current CPU\n"
" cpu <number> Switch to CPU<number>\n");
- if (!state->debug_busy) {
- strcpy(state->debug_cmd, "help");
- state->debug_busy = 1;
- debug_force_irq(state);
- }
+ debug_printf(state, " ps Process list\n"
+ " sysrq sysrq options\n"
+ " sysrq <param> Execute sysrq with <param>\n");
}
-static void debug_exec(struct fiq_debugger_state *state,
+static void take_affinity(void *info)
+{
+ struct fiq_debugger_state *state = info;
+ struct cpumask cpumask;
+
+ cpumask_clear(&cpumask);
+ cpumask_set_cpu(get_cpu(), &cpumask);
+
+ irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+ if (!debug_have_fiq(state))
+ smp_call_function_single(cpu, take_affinity, state, false);
+ state->current_cpu = cpu;
+}
+
+static bool debug_fiq_exec(struct fiq_debugger_state *state,
const char *cmd, unsigned *regs, void *svc_sp)
{
+ bool signal_helper = false;
+
if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
debug_help(state);
} else if (!strcmp(cmd, "pc")) {
@@ -494,8 +614,10 @@
debug_printf(state, "%s\n", linux_banner);
} else if (!strcmp(cmd, "sleep")) {
state->no_sleep = false;
+ debug_printf(state, "enabling sleep\n");
} else if (!strcmp(cmd, "nosleep")) {
state->no_sleep = true;
+ debug_printf(state, "disabling sleep\n");
} else if (!strcmp(cmd, "console")) {
state->console_enable = true;
debug_printf(state, "console mode\n");
@@ -504,7 +626,7 @@
} else if (!strncmp(cmd, "cpu ", 4)) {
unsigned long cpu = 0;
if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
- state->current_cpu = cpu;
+ switch_cpu(state, cpu);
else
debug_printf(state, "invalid cpu\n");
debug_printf(state, "cpu %d\n", state->current_cpu);
@@ -518,30 +640,49 @@
state->debug_busy = 1;
}
- debug_force_irq(state);
-
- return;
+ return true;
}
if (!state->console_enable)
debug_prompt(state);
+
+ return signal_helper;
}
static void sleep_timer_expired(unsigned long data)
{
struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+ unsigned long flags;
- if (state->uart_clk_enabled && !state->no_sleep) {
- if (state->debug_enable) {
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ if (state->uart_enabled && !state->no_sleep) {
+ if (state->debug_enable && !state->console_enable) {
state->debug_enable = false;
debug_printf_nfiq(state, "suspending fiq debugger\n");
}
state->ignore_next_wakeup_irq = true;
- if (state->clk)
- clk_disable(state->clk);
- state->uart_clk_enabled = false;
+ debug_uart_disable(state);
+ state->uart_enabled = false;
enable_wakeup_irq(state);
}
wake_unlock(&state->debugger_wake_lock);
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void handle_wakeup(struct fiq_debugger_state *state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+ state->ignore_next_wakeup_irq = false;
+ } else if (!state->uart_enabled) {
+ wake_lock(&state->debugger_wake_lock);
+ debug_uart_enable(state);
+ state->uart_enabled = true;
+ disable_wakeup_irq(state);
+ mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+ }
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
}
static irqreturn_t wakeup_irq_handler(int irq, void *dev)
@@ -550,35 +691,28 @@
if (!state->no_sleep)
debug_puts(state, "WAKEUP\n");
- if (state->ignore_next_wakeup_irq)
- state->ignore_next_wakeup_irq = false;
- else if (!state->uart_clk_enabled) {
- wake_lock(&state->debugger_wake_lock);
- if (state->clk)
- clk_enable(state->clk);
- state->uart_clk_enabled = true;
- disable_wakeup_irq(state);
- mod_timer(&state->sleep_timer, jiffies + HZ / 2);
- }
+ handle_wakeup(state);
+
return IRQ_HANDLED;
}
-static irqreturn_t debug_irq(int irq, void *dev)
-{
- struct fiq_debugger_state *state = dev;
- if (state->pdata->force_irq_ack)
- state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+static void debug_handle_irq_context(struct fiq_debugger_state *state)
+{
if (!state->no_sleep) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
wake_lock(&state->debugger_wake_lock);
mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
}
#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
if (state->tty) {
int i;
int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
for (i = 0; i < count; i++) {
- int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, i);
+ int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
tty_insert_flip_char(state->tty, c, TTY_NORMAL);
if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
pr_warn("fiq tty failed to consume byte\n");
@@ -587,16 +721,10 @@
}
#endif
if (state->debug_busy) {
- struct kdbg_ctxt ctxt;
-
- ctxt.printf = debug_printf_nfiq;
- ctxt.cookie = state;
- kernel_debugger(&ctxt, state->debug_cmd);
+ debug_irq_exec(state, state->debug_cmd);
debug_prompt(state);
-
state->debug_busy = 0;
}
- return IRQ_HANDLED;
}
static int debug_getc(struct fiq_debugger_state *state)
@@ -604,30 +732,29 @@
return state->pdata->uart_getc(state->pdev);
}
-static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp)
+static bool debug_handle_uart_interrupt(struct fiq_debugger_state *state,
+ int this_cpu, void *regs, void *svc_sp)
{
- struct fiq_debugger_state *state =
- container_of(h, struct fiq_debugger_state, handler);
int c;
static int last_c;
int count = 0;
- unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+ bool signal_helper = false;
if (this_cpu != state->current_cpu) {
if (state->in_fiq)
- return;
+ return false;
if (atomic_inc_return(&state->unhandled_fiq_count) !=
MAX_UNHANDLED_FIQ_COUNT)
- return;
+ return false;
debug_printf(state, "fiq_debugger: cpu %d not responding, "
"reverting to cpu %d\n", state->current_cpu,
this_cpu);
atomic_set(&state->unhandled_fiq_count, 0);
- state->current_cpu = this_cpu;
- return;
+ switch_cpu(state, this_cpu);
+ return false;
}
state->in_fiq = true;
@@ -648,7 +775,7 @@
#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
} else if (state->console_enable && state->tty_rbuf) {
fiq_debugger_ringbuf_push(state->tty_rbuf, c);
- debug_force_irq(state);
+ signal_helper = true;
#endif
} else if ((c >= ' ') && (c < 127)) {
if (state->debug_count < (DEBUG_MAX - 1)) {
@@ -670,8 +797,9 @@
if (state->debug_count) {
state->debug_buf[state->debug_count] = 0;
state->debug_count = 0;
- debug_exec(state, state->debug_buf,
- regs, svc_sp);
+ signal_helper |=
+ debug_fiq_exec(state, state->debug_buf,
+ regs, svc_sp);
} else {
debug_prompt(state);
}
@@ -684,10 +812,63 @@
/* poke sleep timer if necessary */
if (state->debug_enable && !state->no_sleep)
- debug_force_irq(state);
+ signal_helper = true;
atomic_set(&state->unhandled_fiq_count, 0);
state->in_fiq = false;
+
+ return signal_helper;
+}
+
+static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp)
+{
+ struct fiq_debugger_state *state =
+ container_of(h, struct fiq_debugger_state, handler);
+ unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+ bool need_irq;
+
+ need_irq = debug_handle_uart_interrupt(state, this_cpu, regs, svc_sp);
+ if (need_irq)
+ debug_force_irq(state);
+}
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t debug_uart_irq(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+ bool not_done;
+
+ handle_wakeup(state);
+
+ /* handle the debugger irq in regular context */
+ not_done = debug_handle_uart_interrupt(state, smp_processor_id(),
+ get_irq_regs(),
+ current_thread_info());
+ if (not_done)
+ debug_handle_irq_context(state);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t debug_signal_irq(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+
+ if (state->pdata->force_irq_ack)
+ state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+ debug_handle_irq_context(state);
+
+ return IRQ_HANDLED;
}
static void debug_resume(struct fiq_glue_handler *h)
@@ -714,15 +895,17 @@
state = container_of(co, struct fiq_debugger_state, console);
- if (!state->console_enable)
+ if (!state->console_enable && !state->syslog_dumping)
return;
+ debug_uart_enable(state);
while (count--) {
if (*s == '\n')
state->pdata->uart_putc(state->pdev, '\r');
state->pdata->uart_putc(state->pdev, *s++);
}
debug_uart_flush(state);
+ debug_uart_disable(state);
}
static struct console fiq_debugger_console = {
@@ -759,12 +942,10 @@
if (!state->console_enable)
return count;
- if (state->clk)
- clk_enable(state->clk);
+ debug_uart_enable(state);
for (i = 0; i < count; i++)
state->pdata->uart_putc(state->pdev, *buf++);
- if (state->clk)
- clk_disable(state->clk);
+ debug_uart_disable(state);
return count;
}
@@ -829,18 +1010,51 @@
}
#endif
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+ if (state->pdata->uart_dev_suspend)
+ return state->pdata->uart_dev_suspend(pdev);
+ return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+ if (state->pdata->uart_dev_resume)
+ return state->pdata->uart_dev_resume(pdev);
+ return 0;
+}
+
static int fiq_debugger_probe(struct platform_device *pdev)
{
int ret;
struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
struct fiq_debugger_state *state;
+ int fiq;
+ int uart_irq;
- if (!pdata->uart_getc || !pdata->uart_putc || !pdata->fiq_enable)
+ if (!pdata->uart_getc || !pdata->uart_putc)
+ return -EINVAL;
+ if ((pdata->uart_enable && !pdata->uart_disable) ||
+ (!pdata->uart_enable && pdata->uart_disable))
+ return -EINVAL;
+
+ fiq = platform_get_irq_byname(pdev, "fiq");
+ uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+ /* uart_irq mode and fiq mode are mutually exclusive, but one of them
+ * is required */
+ if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+ return -EINVAL;
+ if (fiq >= 0 && !pdata->fiq_enable)
return -EINVAL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
- state->handler.fiq = debug_fiq;
- state->handler.resume = debug_resume;
setup_timer(&state->sleep_timer, sleep_timer_expired,
(unsigned long)state);
state->pdata = pdata;
@@ -849,11 +1063,16 @@
state->debug_enable = initial_debug_enable;
state->console_enable = initial_console_enable;
- state->fiq = platform_get_irq_byname(pdev, "fiq");
+ state->fiq = fiq;
+ state->uart_irq = uart_irq;
state->signal_irq = platform_get_irq_byname(pdev, "signal");
state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
- if (state->wakeup_irq < 0)
+ platform_set_drvdata(pdev, state);
+
+ spin_lock_init(&state->sleep_timer_lock);
+
+ if (state->wakeup_irq < 0 && debug_have_fiq(state))
state->no_sleep = true;
state->ignore_next_wakeup_irq = !state->no_sleep;
@@ -864,6 +1083,10 @@
if (IS_ERR(state->clk))
state->clk = NULL;
+ /* do not call pdata->uart_enable here since uart_init may still
+ * need to do some initialization before uart_enable can work.
+ * So, only try to manage the clock during init.
+ */
if (state->clk)
clk_enable(state->clk);
@@ -876,21 +1099,39 @@
debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n",
state->no_sleep ? "" : "twice ");
- ret = fiq_glue_register_handler(&state->handler);
- if (ret) {
- pr_err("serial_debugger: could not install fiq handler\n");
- goto err_register_fiq;
- }
+ if (debug_have_fiq(state)) {
+ state->handler.fiq = debug_fiq;
+ state->handler.resume = debug_resume;
+ ret = fiq_glue_register_handler(&state->handler);
+ if (ret) {
+ pr_err("%s: could not install fiq handler\n", __func__);
+ goto err_register_fiq;
+ }
- pdata->fiq_enable(pdev, state->fiq, 1);
+ pdata->fiq_enable(pdev, state->fiq, 1);
+ } else {
+ ret = request_irq(state->uart_irq, debug_uart_irq,
+ IRQF_NO_SUSPEND, "debug", state);
+ if (ret) {
+ pr_err("%s: could not install irq handler\n", __func__);
+ goto err_register_irq;
+ }
+
+ /* for irq-only mode, we want this irq to wake us up, if it
+ * can.
+ */
+ enable_irq_wake(state->uart_irq);
+ }
if (state->clk)
clk_disable(state->clk);
- ret = request_irq(state->signal_irq, debug_irq,
- IRQF_TRIGGER_RISING, "debug", state);
- if (ret)
- pr_err("serial_debugger: could not install signal_irq");
+ if (state->signal_irq >= 0) {
+ ret = request_irq(state->signal_irq, debug_signal_irq,
+ IRQF_TRIGGER_RISING, "debug-signal", state);
+ if (ret)
+ pr_err("serial_debugger: could not install signal_irq");
+ }
if (state->wakeup_irq >= 0) {
ret = request_irq(state->wakeup_irq, wakeup_irq_handler,
@@ -910,7 +1151,7 @@
}
}
if (state->no_sleep)
- wakeup_irq_handler(state->wakeup_irq, state);
+ handle_wakeup(state);
#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
state->console = fiq_debugger_console;
@@ -919,19 +1160,32 @@
#endif
return 0;
+err_register_irq:
err_register_fiq:
if (pdata->uart_free)
pdata->uart_free(pdev);
err_uart_init:
- kfree(state);
+ if (state->clk)
+ clk_disable(state->clk);
if (state->clk)
clk_put(state->clk);
+ wake_lock_destroy(&state->debugger_wake_lock);
+ platform_set_drvdata(pdev, NULL);
+ kfree(state);
return ret;
}
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+ .suspend = fiq_debugger_dev_suspend,
+ .resume = fiq_debugger_dev_resume,
+};
+
static struct platform_driver fiq_debugger_driver = {
- .probe = fiq_debugger_probe,
- .driver.name = "fiq_debugger",
+ .probe = fiq_debugger_probe,
+ .driver = {
+ .name = "fiq_debugger",
+ .pm = &fiq_debugger_dev_pm_ops,
+ },
};
static int __init fiq_debugger_init(void)
diff --git a/arch/arm/include/asm/fiq_debugger.h b/arch/arm/include/asm/fiq_debugger.h
index e711b57..4d27488 100644
--- a/arch/arm/include/asm/fiq_debugger.h
+++ b/arch/arm/include/asm/fiq_debugger.h
@@ -27,6 +27,19 @@
#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal"
#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup"
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume: used to restore uart state right before enabling
+ * the fiq.
+ * @uart_enable: Do the work necessary to communicate with the uart
+ * hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable: Do the work necessary to disable the uart hw
+ * (disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend: called during PM suspend, generally not needed
+ * for real fiq mode debugger.
+ * @uart_dev_resume: called during PM resume, generally not needed
+ * for real fiq mode debugger.
+ */
struct fiq_debugger_pdata {
int (*uart_init)(struct platform_device *pdev);
void (*uart_free)(struct platform_device *pdev);
@@ -34,6 +47,11 @@
int (*uart_getc)(struct platform_device *pdev);
void (*uart_putc)(struct platform_device *pdev, unsigned int c);
void (*uart_flush)(struct platform_device *pdev);
+ void (*uart_enable)(struct platform_device *pdev);
+ void (*uart_disable)(struct platform_device *pdev);
+
+ int (*uart_dev_suspend)(struct platform_device *pdev);
+ int (*uart_dev_resume)(struct platform_device *pdev);
void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
bool enable);
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 8c73900..253cc86 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -25,17 +25,17 @@
#ifdef CONFIG_SMP
-#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \
__asm__ __volatile__( \
- "1: ldrex %1, [%2]\n" \
+ "1: ldrex %1, [%3]\n" \
" " insn "\n" \
- "2: strex %1, %0, [%2]\n" \
- " teq %1, #0\n" \
+ "2: strex %2, %0, [%3]\n" \
+ " teq %2, #0\n" \
" bne 1b\n" \
" mov %0, #0\n" \
- __futex_atomic_ex_table("%4") \
- : "=&r" (ret), "=&r" (oldval) \
+ __futex_atomic_ex_table("%5") \
+ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
@@ -73,14 +73,14 @@
#include <linux/preempt.h>
#include <asm/domain.h>
-#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
__asm__ __volatile__( \
- "1: " T(ldr) " %1, [%2]\n" \
+ "1: " T(ldr) " %1, [%3]\n" \
" " insn "\n" \
- "2: " T(str) " %0, [%2]\n" \
+ "2: " T(str) " %0, [%3]\n" \
" mov %0, #0\n" \
- __futex_atomic_ex_table("%4") \
- : "=&r" (ret), "=&r" (oldval) \
+ __futex_atomic_ex_table("%5") \
+ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
@@ -117,7 +117,7 @@
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
+ int oldval = 0, ret, tmp;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
@@ -129,19 +129,19 @@
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg);
+ __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg);
break;
default:
ret = -ENOSYS;
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2635c8b..41c8c06 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 1fc2f49..49106e1 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -57,6 +57,7 @@
#define L2X0_STNDBY_MODE_EN (1 << 0)
/* Registers shifts and masks */
+#define L2X0_CACHE_ID_REV_MASK (0x3f)
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
#define L2X0_CACHE_ID_PART_L220 (2 << 6)
@@ -65,7 +66,7 @@
#define L2X0_AUX_CTRL_MASK 0xc0000fff
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
-#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17)
+#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
@@ -74,6 +75,8 @@
#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
#define L2X0_AUX_CTRL_EVNT_MON_BUS_EN_SHIFT 20
+#define REV_PL310_R2P0 4
+
#ifndef __ASSEMBLY__
extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
extern void l2x0_suspend(void);
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 5a526af..a565633 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -26,6 +26,9 @@
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
#endif
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 787b12d..a6dadf0 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -98,4 +98,6 @@
*/
extern void show_local_irqs(struct seq_file *, int);
+extern void smp_send_all_cpu_backtrace(void);
+
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 9633178..462aefb 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -307,8 +307,8 @@
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] =
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 26959d5..4ee00c8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -62,6 +62,18 @@
#include <mach/system.h>
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+ smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+ dump_stack();
+}
+#endif
+
void disable_hlt(void)
{
hlt_counter++;
@@ -239,8 +251,8 @@
local_irq_enable();
}
}
- idle_notifier_call_chain(IDLE_END);
tick_nohz_restart_sched_tick();
+ idle_notifier_call_chain(IDLE_END);
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 1455efe..9e217ad 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -54,6 +54,7 @@
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_CPU_BACKTRACE,
};
int __cpuinit __cpu_up(unsigned int cpu)
@@ -407,6 +408,7 @@
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_CPU_BACKTRACE, "CPU backtrace"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -557,6 +559,58 @@
cpu_relax();
}
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ int i;
+
+ if (test_and_set_bit(0, &backtrace_flag))
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ return;
+
+ cpumask_copy(&backtrace_mask, cpu_online_mask);
+ cpu_clear(this_cpu, backtrace_mask);
+
+ pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+ dump_stack();
+
+ pr_info("\nsending IPI to all other CPUs:\n");
+ smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+ /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(&backtrace_mask))
+ break;
+ mdelay(1);
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+ if (cpu_isset(cpu, backtrace_mask)) {
+ raw_spin_lock(&backtrace_lock);
+ pr_warning("IPI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ raw_spin_unlock(&backtrace_lock);
+ cpu_clear(cpu, backtrace_mask);
+ }
+}
+
/*
* Main handler for inter-processor interrupts
*/
@@ -597,6 +651,10 @@
ipi_cpu_stop(cpu);
break;
+ case IPI_CPU_BACKTRACE:
+ ipi_cpu_backtrace(cpu, regs);
+ break;
+
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
cpu, ipinr);
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index a1e757c..cb7dd40 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -13,6 +13,7 @@
#include <asm/smp_scu.h>
#include <asm/cacheflush.h>
+#include <asm/cputype.h>
#define SCU_CTRL 0x00
#define SCU_CONFIG 0x04
@@ -36,6 +37,15 @@
{
u32 scu_ctrl;
+#ifdef CONFIG_ARM_ERRATA_764369
+ /* Cortex-A9 only */
+ if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
+ scu_ctrl = __raw_readl(scu_base + 0x30);
+ if (!(scu_ctrl & 1))
+ __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+ }
+#endif
+
scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
/* already enabled? */
if (scu_ctrl & 1)
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index a7b41bf..e83cc86 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -115,6 +115,32 @@
},
};
+#ifdef CONFIG_MTD
+static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
+{
+ char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
+ size_t retlen;
+
+ if (!strcmp(mtd->name, "MAC-Address")) {
+ mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
+ if (retlen == ETH_ALEN)
+ pr_info("Read MAC addr from SPI Flash: %pM\n",
+ mac_addr);
+ }
+}
+
+static struct mtd_notifier da850evm_spi_notifier = {
+ .add = da850_evm_m25p80_notify_add,
+};
+
+static void da850_evm_setup_mac_addr(void)
+{
+ register_mtd_user(&da850evm_spi_notifier);
+}
+#else
+static void da850_evm_setup_mac_addr(void) { }
+#endif
+
static struct mtd_partition da850_evm_norflash_partition[] = {
{
.name = "bootloaders + env",
@@ -1237,6 +1263,8 @@
if (ret)
pr_warning("da850_evm_init: spi 1 registration failed: %d\n",
ret);
+
+ da850_evm_setup_mac_addr();
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index fb5e72b..5f1e045 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -217,7 +217,11 @@
ENDPROC(davinci_ddr_psc_config)
CACHE_FLUSH:
- .word arm926_flush_kern_cache_all
+#ifdef CONFIG_CPU_V6
+ .word v6_flush_kern_cache_all
+#else
+ .word arm926_flush_kern_cache_all
+#endif
ENTRY(davinci_cpu_suspend_sz)
.word . - davinci_cpu_suspend
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 5ed51b8..cf7e598 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -160,7 +160,7 @@
void __init dove_spi1_init(void)
{
- orion_spi_init(DOVE_SPI1_PHYS_BASE, get_tclk());
+ orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
}
/*****************************************************************************
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 2fbbdd5..fcf0ae9 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -337,15 +337,15 @@
static void integrator_clocksource_init(u32 khz)
{
void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
- u32 ctrl = TIMER_CTRL_ENABLE;
+ u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
if (khz >= 1500) {
khz /= 16;
- ctrl = TIMER_CTRL_DIV16;
+ ctrl |= TIMER_CTRL_DIV16;
}
- writel(ctrl, base + TIMER_CTRL);
writel(0xffff, base + TIMER_LOAD);
+ writel(ctrl, base + TIMER_CTRL);
clocksource_mmio_init(base + TIMER_VALUE, "timer2",
khz * 1000, 200, 16, clocksource_mmio_readl_down);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index f8b9392..9a9706c 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -6,6 +6,7 @@
select ARM_GIC
select HAS_MTU
select ARM_ERRATA_753970
+ select ARM_ERRATA_754322
menu "Ux500 SoC"
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 95a7079..7aacb84 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -32,6 +32,16 @@
static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static uint32_t l2x0_size;
+static u32 l2x0_cache_id;
+static unsigned int l2x0_sets;
+static unsigned int l2x0_ways;
+
+static inline bool is_pl310_rev(int rev)
+{
+ return (l2x0_cache_id &
+ (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
+ (L2X0_CACHE_ID_PART_L310 | rev);
+}
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
@@ -119,6 +129,23 @@
cache_sync();
}
+#ifdef CONFIG_PL310_ERRATA_727915
+static void l2x0_for_each_set_way(void __iomem *reg)
+{
+ int set;
+ int way;
+ unsigned long flags;
+
+ for (way = 0; way < l2x0_ways; way++) {
+ spin_lock_irqsave(&l2x0_lock, flags);
+ for (set = 0; set < l2x0_sets; set++)
+ writel_relaxed((way << 28) | (set << 5), reg);
+ cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
+}
+#endif
+
static void __l2x0_flush_all(void)
{
debug_writel(0x03);
@@ -132,6 +159,13 @@
{
unsigned long flags;
+#ifdef CONFIG_PL310_ERRATA_727915
+ if (is_pl310_rev(REV_PL310_R2P0)) {
+ l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
+ return;
+ }
+#endif
+
/* clean all ways */
spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
@@ -142,11 +176,20 @@
{
unsigned long flags;
+#ifdef CONFIG_PL310_ERRATA_727915
+ if (is_pl310_rev(REV_PL310_R2P0)) {
+ l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
+ return;
+ }
+#endif
+
/* clean all ways */
spin_lock_irqsave(&l2x0_lock, flags);
+ debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
cache_sync();
+ debug_writel(0x00);
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -322,13 +365,11 @@
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux, bits;
- __u32 cache_id;
__u32 way_size = 0;
- int ways;
const char *type;
l2x0_base = base;
- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+ l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
bits = readl_relaxed(l2x0_base + L2X0_CTRL);
bits &= ~0x01; /* clear bit 0 */
@@ -340,33 +381,34 @@
aux |= aux_val;
/* Determine the number of ways */
- switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
- ways = 16;
+ l2x0_ways = 16;
else
- ways = 8;
+ l2x0_ways = 8;
type = "L310";
break;
case L2X0_CACHE_ID_PART_L210:
- ways = (aux >> 13) & 0xf;
+ l2x0_ways = (aux >> 13) & 0xf;
type = "L210";
break;
default:
/* Assume unknown chips have 8 ways */
- ways = 8;
+ l2x0_ways = 8;
type = "L2x0 series";
break;
}
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
- l2x0_way_mask = (1 << ways) - 1;
+ l2x0_way_mask = (1 << l2x0_ways) - 1;
/*
* L2 cache Size = Way size * Number of ways
*/
way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
- way_size = 1 << (way_size + 3);
- l2x0_size = ways * way_size * SZ_1K;
+ way_size = SZ_1K << (way_size + 3);
+ l2x0_size = l2x0_ways * way_size;
+ l2x0_sets = way_size / CACHE_LINE_SIZE;
l2x0_inv_all();
@@ -375,7 +417,7 @@
bits |= 0x01; /* set bit 0 */
writel_relaxed(bits, l2x0_base + L2X0_CTRL);
- switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L220:
outer_cache.inv_range = l2x0_inv_range;
outer_cache.clean_range = l2x0_clean_range;
@@ -407,7 +449,7 @@
mb();
printk(KERN_INFO "%s cache controller enabled\n", type);
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
- ways, cache_id, aux, l2x0_size);
+ l2x0_ways, l2x0_cache_id, aux, l2x0_size);
}
void l2x0_suspend(void)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 0db2092..5cd1b05 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -174,6 +174,10 @@
dcache_line_size r2, r3
sub r3, r2, #1
bic r12, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
add r12, r12, r2
@@ -223,6 +227,10 @@
add r1, r0, r1
sub r3, r2, #1
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
@@ -247,6 +255,10 @@
sub r3, r2, #1
tst r0, r3
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
tst r1, r3
@@ -270,6 +282,10 @@
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
add r0, r0, r2
@@ -288,6 +304,10 @@
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
add r0, r0, r2
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4fc0e3f..340c2d0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -322,6 +322,8 @@
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
+ else
+ __dma_free_buffer(page, size);
return addr;
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7cd02ff..5739a34 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -546,6 +546,13 @@
*/
bank_start = min(bank_start,
ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#else
+ /*
+ * Align down here since the VM subsystem insists that the
+ * memmap entries are valid from the bank start aligned to
+ * MAX_ORDER_NR_PAGES.
+ */
+ bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
#endif
/*
* If we had a previous bank, and there is a space
diff --git a/arch/arm/plat-mxc/include/mach/iomux-v3.h b/arch/arm/plat-mxc/include/mach/iomux-v3.h
index 82620af..ebbce33 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-v3.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-v3.h
@@ -66,7 +66,6 @@
#define MUX_MODE_MASK ((iomux_v3_cfg_t)0x1f << MUX_MODE_SHIFT)
#define MUX_PAD_CTRL_SHIFT 41
#define MUX_PAD_CTRL_MASK ((iomux_v3_cfg_t)0x1ffff << MUX_PAD_CTRL_SHIFT)
-#define NO_PAD_CTRL ((iomux_v3_cfg_t)1 << (MUX_PAD_CTRL_SHIFT + 16))
#define MUX_SEL_INPUT_SHIFT 58
#define MUX_SEL_INPUT_MASK ((iomux_v3_cfg_t)0xf << MUX_SEL_INPUT_SHIFT)
@@ -85,6 +84,7 @@
* Use to set PAD control
*/
+#define NO_PAD_CTRL (1 << 16)
#define PAD_CTL_DVS (1 << 13)
#define PAD_CTL_HYS (1 << 8)
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 9897dcf..404538a 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -77,15 +77,12 @@
bne look_for_VFP_exceptions @ VFP is already enabled
DBGSTR1 "enable %x", r10
- ldr r3, last_VFP_context_address
+ ldr r3, vfp_current_hw_state_address
orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
- ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer
+ ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
- cmp r4, r10
- beq check_for_exception @ we are returning to the same
- @ process, so the registers are
- @ still there. In this case, we do
- @ not want to drop a pending exception.
+ cmp r4, r10 @ this thread owns the hw context?
+ beq vfp_hw_state_valid
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
@ exceptions, so we can get at the
@@ -116,7 +113,7 @@
no_old_VFP_process:
DBGSTR1 "load state %p", r10
- str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer
+ str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
@ Load the saved state back into the VFP
VFPFLDMIA r10, r5 @ reload the working registers while
@ FPEXC is in a safe state
@@ -132,7 +129,8 @@
#endif
VFPFMXR FPSCR, r5 @ restore status
-check_for_exception:
+@ The context stored in the VFP hardware is up to date with this thread
+vfp_hw_state_valid:
tst r1, #FPEXC_EX
bne process_exception @ might as well handle the pending
@ exception before retrying branch
@@ -207,8 +205,8 @@
ENDPROC(vfp_save_state)
.align
-last_VFP_context_address:
- .word last_VFP_context
+vfp_current_hw_state_address:
+ .word vfp_current_hw_state
.macro tbl_branch, base, tmp, shift
#ifdef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index a8a8925..e7fc95c 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -33,7 +33,13 @@
void vfp_null_entry(void);
void (*vfp_vector)(void) = vfp_null_entry;
-union vfp_state *last_VFP_context[NR_CPUS];
+
+/*
+ * The pointer to the vfpstate structure of the thread which currently
+ * owns the context held in the VFP hardware, or NULL if the hardware
+ * context is invalid.
+ */
+union vfp_state *vfp_current_hw_state[NR_CPUS];
/*
* Dual-use variable.
@@ -57,12 +63,12 @@
/*
* Disable VFP to ensure we initialize it first. We must ensure
- * that the modification of last_VFP_context[] and hardware disable
+ * that the modification of vfp_current_hw_state[] and hardware disable
* are done for the same CPU and without preemption.
*/
cpu = get_cpu();
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
put_cpu();
}
@@ -73,8 +79,8 @@
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu = get_cpu();
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
put_cpu();
}
@@ -129,9 +135,9 @@
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
- if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
- vfp_save_state(last_VFP_context[cpu], fpexc);
- last_VFP_context[cpu]->hard.cpu = cpu;
+ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+ vfp_current_hw_state[cpu]->hard.cpu = cpu;
}
/*
* Thread migration, just force the reloading of the
@@ -139,7 +145,7 @@
* contain stale data.
*/
if (thread->vfpstate.hard.cpu != cpu)
- last_VFP_context[cpu] = NULL;
+ vfp_current_hw_state[cpu] = NULL;
#endif
/*
@@ -413,22 +419,22 @@
#ifdef CONFIG_SMP
/* On SMP, if VFP is enabled, save the old state */
- if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
- last_VFP_context[cpu]->hard.cpu = cpu;
+ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
+ vfp_current_hw_state[cpu]->hard.cpu = cpu;
#else
/* If there is a VFP context we must save it. */
- if (last_VFP_context[cpu]) {
+ if (vfp_current_hw_state[cpu]) {
/* Enable VFP so we can save the old state. */
fmxr(FPEXC, fpexc | FPEXC_EN);
isb();
#endif
- vfp_save_state(last_VFP_context[cpu], fpexc);
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
/* disable, just in case */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
saved = 1;
}
- last_VFP_context[cpu] = NULL;
+ vfp_current_hw_state[cpu] = NULL;
local_irq_restore(flags);
@@ -481,7 +487,7 @@
* If the thread we're interested in is the current owner of the
* hardware VFP state, then we need to save its state.
*/
- if (last_VFP_context[cpu] == &thread->vfpstate) {
+ if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
u32 fpexc = fmrx(FPEXC);
/*
@@ -503,7 +509,7 @@
* If the thread we're interested in is the current owner of the
* hardware VFP state, then we need to save its state.
*/
- if (last_VFP_context[cpu] == &thread->vfpstate) {
+ if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
u32 fpexc = fmrx(FPEXC);
fmxr(FPEXC, fpexc & ~FPEXC_EN);
@@ -512,7 +518,7 @@
* Set the context to NULL to force a reload the next time
* the thread uses the VFP.
*/
- last_VFP_context[cpu] = NULL;
+ vfp_current_hw_state[cpu] = NULL;
}
#ifdef CONFIG_SMP
@@ -544,7 +550,7 @@
{
if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
unsigned int cpu = (long)hcpu;
- last_VFP_context[cpu] = NULL;
+ vfp_current_hw_state[cpu] = NULL;
} else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
vfp_enable(NULL);
return NOTIFY_OK;