Merge remote-tracking branch 'common/android-3.0' into msm-3.0

* common/android-3.0: (570 commits)
  misc: remove kernel debugger core
  ARM: common: fiq_debugger: dump sysrq directly to console if enabled
  ARM: common: fiq_debugger: add irq context debug functions
  net: wireless: bcmdhd: Call init_ioctl() only if was started properly for WEXT
  net: wireless: bcmdhd: Call init_ioctl() only if was started properly
  net: wireless: bcmdhd: Fix possible memory leak in escan/iscan
  cpufreq: interactive governor: default 20ms timer
  cpufreq: interactive governor: go to intermediate hi speed before max
  cpufreq: interactive governor: scale to max only if at min speed
  cpufreq: interactive governor: apply intermediate load on current speed
  ARM: idle: update idle ticks before call idle end notifier
  input: gpio_input: don't print debounce message unless flag is set
  net: wireless: bcm4329: Skip dhd_bus_stop() if bus is already down
  net: wireless: bcmdhd: Skip dhd_bus_stop() if bus is already down
  net: wireless: bcmdhd: Improve suspend/resume processing
  net: wireless: bcmdhd: Check if FW is Ok for internal FW call
  tcp: Don't nuke connections for the wrong protocol
  ARM: common: fiq_debugger: make uart irq be no_suspend
  net: wireless: Skip connect warning for CONFIG_CFG80211_ALLOW_RECONNECT
  mm: avoid livelock on !__GFP_FS allocations
  ...

Conflicts:
	arch/arm/mm/cache-l2x0.c
	arch/arm/vfp/vfpmodule.c
	drivers/mmc/core/host.c
	kernel/power/wakelock.c
	net/bluetooth/hci_event.c

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index a8a8925..e7fc95c 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -33,7 +33,13 @@
 void vfp_null_entry(void);
 
 void (*vfp_vector)(void) = vfp_null_entry;
-union vfp_state *last_VFP_context[NR_CPUS];
+
+/*
+ * The pointer to the vfpstate structure of the thread which currently
+ * owns the context held in the VFP hardware, or NULL if the hardware
+ * context is invalid.
+ */
+union vfp_state *vfp_current_hw_state[NR_CPUS];
 
 /*
  * Dual-use variable.
@@ -57,12 +63,12 @@
 
 	/*
 	 * Disable VFP to ensure we initialize it first.  We must ensure
-	 * that the modification of last_VFP_context[] and hardware disable
+	 * that the modification of vfp_current_hw_state[] and hardware disable
 	 * are done for the same CPU and without preemption.
 	 */
 	cpu = get_cpu();
-	if (last_VFP_context[cpu] == vfp)
-		last_VFP_context[cpu] = NULL;
+	if (vfp_current_hw_state[cpu] == vfp)
+		vfp_current_hw_state[cpu] = NULL;
 	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
 	put_cpu();
 }
@@ -73,8 +79,8 @@
 	union vfp_state *vfp = &thread->vfpstate;
 	unsigned int cpu = get_cpu();
 
-	if (last_VFP_context[cpu] == vfp)
-		last_VFP_context[cpu] = NULL;
+	if (vfp_current_hw_state[cpu] == vfp)
+		vfp_current_hw_state[cpu] = NULL;
 	put_cpu();
 }
 
@@ -129,9 +135,9 @@
 		 * case the thread migrates to a different CPU. The
 		 * restoring is done lazily.
 		 */
-		if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
-			vfp_save_state(last_VFP_context[cpu], fpexc);
-			last_VFP_context[cpu]->hard.cpu = cpu;
+		if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
+			vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+			vfp_current_hw_state[cpu]->hard.cpu = cpu;
 		}
 		/*
 		 * Thread migration, just force the reloading of the
@@ -139,7 +145,7 @@
 		 * contain stale data.
 		 */
 		if (thread->vfpstate.hard.cpu != cpu)
-			last_VFP_context[cpu] = NULL;
+			vfp_current_hw_state[cpu] = NULL;
 #endif
 
 		/*
@@ -413,22 +419,22 @@
 
 #ifdef CONFIG_SMP
 	/* On SMP, if VFP is enabled, save the old state */
-	if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
-		last_VFP_context[cpu]->hard.cpu = cpu;
+	if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
+		vfp_current_hw_state[cpu]->hard.cpu = cpu;
 #else
 	/* If there is a VFP context we must save it. */
-	if (last_VFP_context[cpu]) {
+	if (vfp_current_hw_state[cpu]) {
 		/* Enable VFP so we can save the old state. */
 		fmxr(FPEXC, fpexc | FPEXC_EN);
 		isb();
 #endif
-		vfp_save_state(last_VFP_context[cpu], fpexc);
+		vfp_save_state(vfp_current_hw_state[cpu], fpexc);
 
 		/* disable, just in case */
 		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
 		saved = 1;
 	}
-	last_VFP_context[cpu] = NULL;
+	vfp_current_hw_state[cpu] = NULL;
 
 	local_irq_restore(flags);
 
@@ -481,7 +487,7 @@
 	 * If the thread we're interested in is the current owner of the
 	 * hardware VFP state, then we need to save its state.
 	 */
-	if (last_VFP_context[cpu] == &thread->vfpstate) {
+	if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
 		u32 fpexc = fmrx(FPEXC);
 
 		/*
@@ -503,7 +509,7 @@
 	 * If the thread we're interested in is the current owner of the
 	 * hardware VFP state, then we need to save its state.
 	 */
-	if (last_VFP_context[cpu] == &thread->vfpstate) {
+	if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
 		u32 fpexc = fmrx(FPEXC);
 
 		fmxr(FPEXC, fpexc & ~FPEXC_EN);
@@ -512,7 +518,7 @@
 		 * Set the context to NULL to force a reload the next time
 		 * the thread uses the VFP.
 		 */
-		last_VFP_context[cpu] = NULL;
+		vfp_current_hw_state[cpu] = NULL;
 	}
 
 #ifdef CONFIG_SMP
@@ -544,7 +550,7 @@
 {
 	if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
 		unsigned int cpu = (long)hcpu;
-		last_VFP_context[cpu] = NULL;
+		vfp_current_hw_state[cpu] = NULL;
 	} else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
 		vfp_enable(NULL);
 	return NOTIFY_OK;