Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4

AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.

* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
  PRNG: Device tree entry for qrng device.
  vidc:1080p: Set video core timeout value for Thumbnail mode
  msm: sps: improve the debugging support in SPS driver
  board-8064 msm: Overlap secure and non secure video firmware heaps.
  msm: clock: Add handoff ops for 7x30 and copper XO clocks
  msm_fb: display: Wait for external vsync before DTV IOMMU unmap
  msm: Fix ciruclar dependency in debug UART settings
  msm: gdsc: Add GDSC regulator driver for msm-copper
  defconfig: Enable Mobicore Driver.
  mobicore: Add mobicore driver.
  mobicore: rename variable to lower case.
  mobicore: rename folder.
  mobicore: add makefiles
  mobicore: initial import of kernel driver
  ASoC: msm: Add SLIMBUS_2_RX CPU DAI
  board-8064-gpio: Update FUNC for EPM SPI CS
  msm_fb: display: Remove chicken bit config during video playback
  mmc: msm_sdcc: enable the sanitize capability
  msm-fb: display: lm2 writeback support on mpq platfroms
  msm_fb: display: Disable LVDS phy & pll during panel off
  ...

Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c8a7d8..cb245ee 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -588,6 +588,9 @@
 config CPU_TLB_V7
 	bool
 
+config EMULATE_DOMAIN_MANAGER_V7
+	bool
+
 config VERIFY_PERMISSION_FAULT
 	bool
 endif
@@ -756,6 +759,19 @@
 	  If your SoC is configured to have a different size, define the value
 	  here with proper conditions.
 
+config CPU_CACHE_ERR_REPORT
+	bool "Report errors in the L1 and L2 caches"
+	depends on ARCH_MSM_SCORPION
+	default n
+	help
+	  The Scorpion processor supports reporting L2 errors, L1 icache parity
+	  errors, and L1 dcache parity errors as imprecise external aborts. If
+	  this option is not enabled these errors will go unreported and data
+	  corruption will occur.
+
+	  Say Y here to have errors in the L1 and L2 caches reported as
+	  imprecise data aborts.
+
 config CPU_DCACHE_WRITETHROUGH
 	bool "Force write through D-cache"
 	depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_FA526) && !CPU_DCACHE_DISABLE
@@ -918,3 +934,30 @@
 	help
 	  This option allows the use of custom mandatory barriers
 	  included via the mach/barriers.h file.
+
+config VCM_MM
+	bool
+
+config VCM
+	bool "Virtual Contiguous Memory (VCM) Layer"
+	depends on MMU
+	select GENERIC_ALLOCATOR
+	select VCM_MM
+	default n
+	help
+	  Virtual Contiguous Memory layer. This is the layer that is intended to
+	  replace PMEM.
+
+	  If you don't know what this is, say N here.
+
+config STRICT_MEMORY_RWX
+	bool "restrict kernel memory permissions as much as possible"
+	default n
+	help
+	  If this is set, kernel text will be made RX, kernel data and stack
+	  RW, rodata R (otherwise all of the kernel 1-to-1 mapping is
+	  made RWX).
+	  The tradeoff is that several sections are padded to
+	  1M boundaries (because their permissions are different and
+	  splitting the 1M pages into 4K ones causes TLB performance
+	  problems), wasting memory.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 37da2cc..1c415af 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -93,6 +93,7 @@
 obj-$(CONFIG_CPU_V6)		+= proc-v6.o
 obj-$(CONFIG_CPU_V6K)		+= proc-v6.o
 obj-$(CONFIG_CPU_V7)		+= proc-v7.o
+obj-$(CONFIG_EMULATE_DOMAIN_MANAGER_V7) += emulate_domain_manager-v7.o
 
 AFLAGS_proc-v6.o	:=-Wa,-march=armv6
 AFLAGS_proc-v7.o	:=-Wa,-march=armv7-a
@@ -101,3 +102,5 @@
 obj-$(CONFIG_CACHE_L2X0)	+= cache-l2x0.o
 obj-$(CONFIG_CACHE_XSC3L2)	+= cache-xsc3l2.o
 obj-$(CONFIG_CACHE_TAUROS2)	+= cache-tauros2.o
+obj-$(CONFIG_VCM)		+= vcm.o vcm_alloc.o
+obj-$(CONFIG_VCM_MM)		+= vcm_mm.o
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index eaa6847..cb9fc76 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -2,6 +2,7 @@
  * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
  *
  * Copyright (C) 2007 ARM Limited
+ * Copyright (c) 2009, 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -36,6 +37,7 @@
 static unsigned int l2x0_sets;
 static unsigned int l2x0_ways;
 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
+static void pl310_save(void);
 
 static inline bool is_pl310_rev(int rev)
 {
@@ -131,7 +133,7 @@
 }
 #endif
 
-static void l2x0_cache_sync(void)
+void l2x0_cache_sync(void)
 {
 	unsigned long flags;
 
@@ -418,9 +420,9 @@
 		writel_relaxed(1, l2x0_base + L2X0_CTRL);
 	}
 
-	outer_cache.inv_range = l2x0_inv_range;
-	outer_cache.clean_range = l2x0_clean_range;
-	outer_cache.flush_range = l2x0_flush_range;
+		outer_cache.inv_range = l2x0_inv_range;
+		outer_cache.clean_range = l2x0_clean_range;
+		outer_cache.flush_range = l2x0_flush_range;
 	outer_cache.sync = l2x0_cache_sync;
 	outer_cache.flush_all = l2x0_flush_all;
 	outer_cache.inv_all = l2x0_inv_all;
@@ -429,6 +431,9 @@
 	printk(KERN_INFO "%s cache controller enabled\n", type);
 	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
 			l2x0_ways, l2x0_cache_id, aux, l2x0_size);
+
+	/* Save the L2X0 contents, as they are not modified else where */
+	pl310_save();
 }
 
 #ifdef CONFIG_OF
@@ -499,8 +504,9 @@
 			       l2x0_base + L2X0_ADDR_FILTER_START);
 	}
 }
+#endif
 
-static void __init pl310_save(void)
+static void pl310_save(void)
 {
 	u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
 		L2X0_CACHE_ID_RTL_MASK;
@@ -574,6 +580,7 @@
 	l2x0_resume();
 }
 
+#ifdef CONFIG_OF
 static const struct l2x0_of_data pl310_data = {
 	pl310_of_setup,
 	pl310_save,
@@ -629,3 +636,15 @@
 	return 0;
 }
 #endif
+
+void l2cc_suspend(void)
+{
+	l2x0_disable();
+	dmb();
+}
+
+void l2cc_resume(void)
+{
+	pl310_resume();
+	dmb();
+}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index ee9bb36..847ea19 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -14,8 +14,11 @@
 #include <linux/percpu.h>
 
 #include <asm/mmu_context.h>
+#include <asm/thread_notify.h>
 #include <asm/tlbflush.h>
 
+#include <mach/msm_rtb.h>
+
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 unsigned int cpu_last_asid = ASID_FIRST_VERSION;
 #ifdef CONFIG_SMP
@@ -37,6 +40,67 @@
 	asm("	mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
 #endif
 
+static void write_contextidr(u32 contextidr)
+{
+	uncached_logk(LOGK_CTXID, (void *)contextidr);
+	asm("mcr	p15, 0, %0, c13, c0, 1" : : "r" (contextidr));
+	isb();
+}
+
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+static u32 read_contextidr(void)
+{
+	u32 contextidr;
+	asm("mrc	p15, 0, %0, c13, c0, 1" : "=r" (contextidr));
+	return contextidr;
+}
+
+static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
+			       void *t)
+{
+	unsigned long flags;
+	u32 contextidr;
+	pid_t pid;
+	struct thread_info *thread = t;
+
+	if (cmd != THREAD_NOTIFY_SWITCH)
+		return NOTIFY_DONE;
+
+	pid = task_pid_nr(thread->task);
+	local_irq_save(flags);
+	contextidr = read_contextidr();
+	contextidr &= ~ASID_MASK;
+	contextidr |= pid << ASID_BITS;
+	write_contextidr(contextidr);
+	local_irq_restore(flags);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block contextidr_notifier_block = {
+	.notifier_call = contextidr_notifier,
+};
+
+static int __init contextidr_notifier_init(void)
+{
+	return thread_register_notifier(&contextidr_notifier_block);
+}
+arch_initcall(contextidr_notifier_init);
+
+static void set_asid(unsigned int asid)
+{
+	u32 contextidr = read_contextidr();
+	contextidr &= ASID_MASK;
+	contextidr |= asid & ~ASID_MASK;
+	write_contextidr(contextidr);
+}
+#else
+static void set_asid(unsigned int asid)
+{
+	write_contextidr(asid);
+}
+#endif
+
 /*
  * We fork()ed a process, and we need a new context for the child
  * to run in.  We reserve version 0 for initial tasks so we will
@@ -52,8 +116,7 @@
 static void flush_context(void)
 {
 	/* set the reserved ASID before flushing the TLB */
-	cpu_set_asid(0);
-	isb();
+	set_asid(0);
 	local_flush_tlb_all();
 	if (icache_is_vivt_asid_tagged()) {
 		__flush_icache_all();
@@ -114,8 +177,7 @@
 	set_mm_context(mm, asid);
 
 	/* set the new ASID */
-	cpu_set_asid(mm->context.id);
-	isb();
+	set_asid(mm->context.id);
 }
 
 #else
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index db23ae4..702408c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -128,7 +128,7 @@
  */
 static pte_t **consistent_pte;
 
-#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
+#define DEFAULT_CONSISTENT_DMA_SIZE (7*SZ_2M)
 
 unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
 
@@ -467,18 +467,22 @@
 void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
 	enum dma_data_direction dir)
 {
+#ifdef CONFIG_OUTER_CACHE
 	unsigned long paddr;
 
 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
+#endif
 
 	dmac_map_area(kaddr, size, dir);
 
+#ifdef CONFIG_OUTER_CACHE
 	paddr = __pa(kaddr);
 	if (dir == DMA_FROM_DEVICE) {
 		outer_inv_range(paddr, paddr + size);
 	} else {
 		outer_clean_range(paddr, paddr + size);
 	}
+#endif
 	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
 EXPORT_SYMBOL(___dma_single_cpu_to_dev);
@@ -486,6 +490,7 @@
 void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
 	enum dma_data_direction dir)
 {
+#ifdef CONFIG_OUTER_CACHE
 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
 
 	/* FIXME: non-speculating: not required */
@@ -494,7 +499,7 @@
 		unsigned long paddr = __pa(kaddr);
 		outer_inv_range(paddr, paddr + size);
 	}
-
+#endif
 	dmac_unmap_area(kaddr, size, dir);
 }
 EXPORT_SYMBOL(___dma_single_dev_to_cpu);
diff --git a/arch/arm/mm/emulate_domain_manager-v7.c b/arch/arm/mm/emulate_domain_manager-v7.c
new file mode 100644
index 0000000..3797e21
--- /dev/null
+++ b/arch/arm/mm/emulate_domain_manager-v7.c
@@ -0,0 +1,345 @@
+/*
+ * Basic implementation of a SW emulation of the domain manager feature in
+ * ARM architecture.  Assumes single processor ARMv7 chipset.
+ *
+ * Requires hooks to be alerted to any runtime changes of dacr or MMU context.
+ *
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <asm/domain.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <linux/module.h>
+
+#define DOMAIN_MANAGER_BITS (0xAAAAAAAA)
+
+#define DFSR_DOMAIN(dfsr) ((dfsr >> 4) & (16-1))
+
+#define FSR_PERMISSION_FAULT(fsr) ((fsr & 0x40D) == 0x00D)
+#define FSR_PERMISSION_SECT(fsr) ((fsr & 0x40F) == 0x00D)
+
+/* ARMv7 MMU HW Macros.  Not conveniently defined elsewhere */
+#define MMU_TTB_ADDRESS(x)   ((u32 *)(((u32)(x)) & ~((1 << 14) - 1)))
+#define MMU_PMD_INDEX(addr) (((u32)addr) >> SECTION_SHIFT)
+#define MMU_TABLE_ADDRESS(x) ((u32 *)((x) & ~((1 << 10) - 1)))
+#define MMU_TABLE_INDEX(x) ((((u32)x) >> 12) & (256 - 1))
+
+/* Convenience Macros */
+#define PMD_IS_VALID(x) (PMD_IS_TABLE(x) || PMD_IS_SECTION(x))
+#define PMD_IS_TABLE(x) ((x & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
+#define PMD_IS_SECTION(x) ((x & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+#define PMD_IS_SUPERSECTION(x) \
+	(PMD_IS_SECTION(x) && ((x & PMD_SECT_SUPER) == PMD_SECT_SUPER))
+
+#define PMD_GET_DOMAIN(x)					\
+	(PMD_IS_TABLE(x) ||					\
+	(PMD_IS_SECTION(x) && !PMD_IS_SUPERSECTION(x)) ?	\
+		 0 : (x >> 5) & (16-1))
+
+#define PTE_IS_LARGE(x) ((x & PTE_TYPE_MASK) == PTE_TYPE_LARGE)
+
+
+/* Only DOMAIN_MMU_ENTRIES will be granted access simultaneously */
+#define DOMAIN_MMU_ENTRIES (8)
+
+#define LRU_INC(lru) ((lru + 1) >= DOMAIN_MMU_ENTRIES ? 0 : lru + 1)
+
+
+static DEFINE_SPINLOCK(edm_lock);
+
+static u32 edm_manager_bits;
+
+struct domain_entry_save {
+	u32 *mmu_entry;
+	u32 *addr;
+	u32 value;
+	u16 sect;
+	u16 size;
+};
+
+static struct domain_entry_save edm_save[DOMAIN_MMU_ENTRIES];
+
+static u32 edm_lru;
+
+
+/*
+ *  Return virtual address of pmd (level 1) entry for addr
+ *
+ *  This routine walks the ARMv7 page tables in HW.
+ */
+static inline u32 *__get_pmd_v7(u32 *addr)
+{
+	u32 *ttb;
+
+	__asm__ __volatile__(
+		"mrc	p15, 0, %0, c2, c0, 0	@ ttbr0\n\t"
+		: "=r" (ttb)
+		:
+	);
+
+	return __va(MMU_TTB_ADDRESS(ttb) + MMU_PMD_INDEX(addr));
+}
+
+/*
+ *  Return virtual address of pte (level 2) entry for addr
+ *
+ *  This routine walks the ARMv7 page tables in HW.
+ */
+static inline u32 *__get_pte_v7(u32 *addr)
+{
+	u32 *pmd = __get_pmd_v7(addr);
+	u32 *table_pa = pmd && PMD_IS_TABLE(*pmd) ?
+		MMU_TABLE_ADDRESS(*pmd) : 0;
+	u32 *entry = table_pa ? __va(table_pa[MMU_TABLE_INDEX(addr)]) : 0;
+
+	return entry;
+}
+
+/*
+ *  Invalidate the TLB for a given address for the current context
+ *
+ *  After manipulating access permissions, TLB invalidation changes are
+ *  observed
+ */
+static inline void __tlb_invalidate(u32 *addr)
+{
+	__asm__ __volatile__(
+		"mrc	p15, 0, %%r2, c13, c0, 1	@ contextidr\n\t"
+		"and %%r2, %%r2, #0xff			@ asid\n\t"
+		"mov %%r3, %0, lsr #12			@ mva[31:12]\n\t"
+		"orr %%r2, %%r2, %%r3, lsl #12		@ tlb mva and asid\n\t"
+		"mcr	p15, 0, %%r2, c8, c7, 1		@ utlbimva\n\t"
+		"isb"
+		:
+		: "r" (addr)
+		: "r2", "r3"
+	);
+}
+
+/*
+ *  Set HW MMU entry and do required synchronization operations.
+ */
+static inline void __set_entry(u32 *entry, u32 *addr, u32 value, int size)
+{
+	int i;
+
+	if (!entry)
+		return;
+
+	entry = (u32 *)((u32) entry & ~(size * sizeof(u32) - 1));
+
+	for (i = 0; i < size; i++)
+		entry[i] = value;
+
+	__asm__ __volatile__(
+		"mcr	p15, 0, %0, c7, c10, 1		@ flush entry\n\t"
+		"dsb\n\t"
+		"isb\n\t"
+		:
+		: "r" (entry)
+	);
+	__tlb_invalidate(addr);
+}
+
+/*
+ *  Return the number of duplicate entries associated with entry value.
+ *  Supersections and Large page table entries are replicated 16x.
+ */
+static inline int __entry_size(int sect, int value)
+{
+	u32 size;
+
+	if (sect)
+		size = PMD_IS_SUPERSECTION(value) ? 16 : 1;
+	else
+		size = PTE_IS_LARGE(value) ? 16 : 1;
+
+	return size;
+}
+
+/*
+ *  Change entry permissions to emulate domain manager access
+ */
+static inline int __manager_perm(int sect, int value)
+{
+	u32 edm_value;
+
+	if (sect) {
+		edm_value = (value & ~(PMD_SECT_APX | PMD_SECT_XN)) |
+		(PMD_SECT_AP_READ | PMD_SECT_AP_WRITE);
+	} else {
+		edm_value = (value & ~(PTE_EXT_APX | PTE_EXT_XN)) |
+			(PTE_EXT_AP1 | PTE_EXT_AP0);
+	}
+	return edm_value;
+}
+
+/*
+ *  Restore original HW MMU entry.  Cancels domain manager access
+ */
+static inline void __restore_entry(int index)
+{
+	struct domain_entry_save *entry = &edm_save[index];
+	u32 edm_value;
+
+	if (!entry->mmu_entry)
+		return;
+
+	edm_value = __manager_perm(entry->sect, entry->value);
+
+	if (*entry->mmu_entry == edm_value)
+		__set_entry(entry->mmu_entry, entry->addr,
+			entry->value, entry->size);
+
+	entry->mmu_entry = 0;
+}
+
+/*
+ *  Modify HW MMU entry to grant domain manager access for a given MMU entry.
+ *  This adds full read, write, and exec access permissions.
+ */
+static inline void __set_manager(int sect, u32 *addr)
+{
+	u32 *entry = sect ? __get_pmd_v7(addr) : __get_pte_v7(addr);
+	u32 value;
+	u32 edm_value;
+	u16 size;
+
+	if (!entry)
+		return;
+
+	value = *entry;
+
+	size = __entry_size(sect, value);
+	edm_value = __manager_perm(sect, value);
+
+	__set_entry(entry, addr, edm_value, size);
+
+	__restore_entry(edm_lru);
+
+	edm_save[edm_lru].mmu_entry = entry;
+	edm_save[edm_lru].addr = addr;
+	edm_save[edm_lru].value = value;
+	edm_save[edm_lru].sect = sect;
+	edm_save[edm_lru].size = size;
+
+	edm_lru = LRU_INC(edm_lru);
+}
+
+/*
+ *  Restore original HW MMU entries.
+ *
+ *  entry - MVA for HW MMU entry
+ */
+static inline void __restore(void)
+{
+	if (unlikely(edm_manager_bits)) {
+		u32 i;
+
+		for (i = 0; i < DOMAIN_MMU_ENTRIES; i++)
+			__restore_entry(i);
+	}
+}
+
+/*
+ * Common abort handler code
+ *
+ * If domain manager was actually set, permission fault would not happen.
+ * Open access permissions to emulate.  Save original settings to restore
+ * later. Return 1 to pretend fault did not happen.
+ */
+static int __emulate_domain_manager_abort(u32 fsr, u32 far, int dabort)
+{
+	if (unlikely(FSR_PERMISSION_FAULT(fsr) && edm_manager_bits)) {
+		int domain = dabort ? DFSR_DOMAIN(fsr) : PMD_GET_DOMAIN(far);
+		if (edm_manager_bits & domain_val(domain, DOMAIN_MANAGER)) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&edm_lock, flags);
+
+			__set_manager(FSR_PERMISSION_SECT(fsr), (u32 *) far);
+
+			spin_unlock_irqrestore(&edm_lock, flags);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Change domain setting.
+ *
+ * Lock and restore original contents.  Extract and save manager bits.  Set
+ * DACR, excluding manager bits.
+ */
+void emulate_domain_manager_set(u32 domain)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&edm_lock, flags);
+
+	if (edm_manager_bits != (domain & DOMAIN_MANAGER_BITS)) {
+		__restore();
+		edm_manager_bits = domain & DOMAIN_MANAGER_BITS;
+	}
+
+	__asm__ __volatile__(
+		"mcr	p15, 0, %0, c3, c0, 0	@ set domain\n\t"
+		"isb"
+		:
+		: "r" (domain & ~DOMAIN_MANAGER_BITS)
+	);
+
+	spin_unlock_irqrestore(&edm_lock, flags);
+}
+EXPORT_SYMBOL_GPL(emulate_domain_manager_set);
+
+/*
+ * Switch thread context.  Restore original contents.
+ */
+void emulate_domain_manager_switch_mm(unsigned long pgd_phys,
+	struct mm_struct *mm,
+	void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *))
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&edm_lock, flags);
+
+	__restore();
+
+	/* Call underlying kernel handler */
+	switch_mm(pgd_phys, mm);
+
+	spin_unlock_irqrestore(&edm_lock, flags);
+}
+EXPORT_SYMBOL_GPL(emulate_domain_manager_switch_mm);
+
+/*
+ * Kernel data_abort hook
+ */
+int emulate_domain_manager_data_abort(u32 dfsr, u32 dfar)
+{
+	return __emulate_domain_manager_abort(dfsr, dfar, 1);
+}
+EXPORT_SYMBOL_GPL(emulate_domain_manager_data_abort);
+
+/*
+ * Kernel prefetch_abort hook
+ */
+int emulate_domain_manager_prefetch_abort(u32 ifsr, u32 ifar)
+{
+	return __emulate_domain_manager_abort(ifsr, ifar, 0);
+}
+EXPORT_SYMBOL_GPL(emulate_domain_manager_prefetch_abort);
+
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5bb4835..ed03b33 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,15 @@
 #include <asm/system_misc.h>
 #include <asm/system_info.h>
 #include <asm/tlbflush.h>
+#include <asm/cputype.h>
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+#include <asm/io.h>
+#include <mach/msm_iomap.h>
+#endif
+
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+#include <asm/domain.h>
+#endif /* CONFIG_EMULATE_DOMAIN_MANAGER_V7 */
 
 #include "fault.h"
 
@@ -509,6 +518,49 @@
 	return 1;
 }
 
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+#define __str(x) #x
+#define MRC(x, v1, v2, v4, v5, v6) do {					\
+	unsigned int __##x;						\
+	asm("mrc " __str(v1) ", " __str(v2) ", %0, " __str(v4) ", "	\
+		__str(v5) ", " __str(v6) "\n" \
+		: "=r" (__##x));					\
+	pr_info("%s: %s = 0x%.8x\n", __func__, #x, __##x);		\
+} while(0)
+
+#define MSM_TCSR_SPARE2 (MSM_TCSR_BASE + 0x60)
+
+#endif
+
+int
+do_imprecise_ext(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+	MRC(ADFSR,    p15, 0,  c5, c1, 0);
+	MRC(DFSR,     p15, 0,  c5, c0, 0);
+	MRC(ACTLR,    p15, 0,  c1, c0, 1);
+	MRC(EFSR,     p15, 7, c15, c0, 1);
+	MRC(L2SR,     p15, 3, c15, c1, 0);
+	MRC(L2CR0,    p15, 3, c15, c0, 1);
+	MRC(L2CPUESR, p15, 3, c15, c1, 1);
+	MRC(L2CPUCR,  p15, 3, c15, c0, 2);
+	MRC(SPESR,    p15, 1,  c9, c7, 0);
+	MRC(SPCR,     p15, 0,  c9, c7, 0);
+	MRC(DMACHSR,  p15, 1, c11, c0, 0);
+	MRC(DMACHESR, p15, 1, c11, c0, 1);
+	MRC(DMACHCR,  p15, 0, c11, c0, 2);
+
+	/* clear out EFSR and ADFSR after fault */
+	asm volatile ("mcr p15, 7, %0, c15, c0, 1\n\t"
+		      "mcr p15, 0, %0, c5, c1, 0"
+		      : : "r" (0));
+#endif
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+	pr_info("%s: TCSR_SPARE2 = 0x%.8x\n", __func__, readl(MSM_TCSR_SPARE2));
+#endif
+	return 1;
+}
+
 struct fsr_info {
 	int	(*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
 	int	sig;
@@ -536,6 +588,75 @@
 	fsr_info[nr].name = name;
 }
 
+#ifdef CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER
+static int krait_tbb_fixup(unsigned int fsr, struct pt_regs *regs)
+{
+	int base_cond, cond = 0;
+	unsigned int p1, cpsr_z, cpsr_c, cpsr_n, cpsr_v;
+
+	if ((read_cpuid_id() & 0xFFFFFFFC) != 0x510F04D0)
+		return 0;
+
+	if (!thumb_mode(regs))
+		return 0;
+
+	/* If ITSTATE is 0, return quickly */
+	if ((regs->ARM_cpsr & PSR_IT_MASK) == 0)
+		return 0;
+
+	cpsr_n = (regs->ARM_cpsr & PSR_N_BIT) ? 1 : 0;
+	cpsr_z = (regs->ARM_cpsr & PSR_Z_BIT) ? 1 : 0;
+	cpsr_c = (regs->ARM_cpsr & PSR_C_BIT) ? 1 : 0;
+	cpsr_v = (regs->ARM_cpsr & PSR_V_BIT) ? 1 : 0;
+
+	p1 = (regs->ARM_cpsr & BIT(12)) ? 1 : 0;
+
+	base_cond = (regs->ARM_cpsr >> 13) & 0x07;
+
+	switch (base_cond) {
+	case 0x0:	/* equal */
+		cond = cpsr_z;
+		break;
+
+	case 0x1:	/* carry set */
+		cond = cpsr_c;
+		break;
+
+	case 0x2:	/* minus / negative */
+		cond = cpsr_n;
+		break;
+
+	case 0x3:	/* overflow */
+		cond = cpsr_v;
+		break;
+
+	case 0x4:	/* unsigned higher */
+		cond = (cpsr_c == 1) && (cpsr_z == 0);
+		break;
+
+	case 0x5:	/* signed greater / equal */
+		cond = (cpsr_n == cpsr_v);
+		break;
+
+	case 0x6:	/* signed greater */
+		cond = (cpsr_z == 0) && (cpsr_n == cpsr_v);
+		break;
+
+	case 0x7:	/* always */
+		cond = 1;
+		break;
+	};
+
+	if (cond == p1) {
+		pr_debug("Conditional abort fixup, PC=%08x, base=%d, cond=%d\n",
+			 (unsigned int) regs->ARM_pc, base_cond, cond);
+		regs->ARM_pc += 2;
+		return 1;
+	}
+	return 0;
+}
+#endif
+
 /*
  * Dispatch a data abort to the relevant handler.
  */
@@ -545,6 +666,16 @@
 	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
 	struct siginfo info;
 
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+	if (emulate_domain_manager_data_abort(fsr, addr))
+		return;
+#endif
+
+#ifdef CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER
+	if (krait_tbb_fixup(fsr, regs))
+		return;
+#endif
+
 	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
 		return;
 
@@ -577,6 +708,11 @@
 	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
 	struct siginfo info;
 
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+	if (emulate_domain_manager_prefetch_abort(ifsr, addr))
+		return;
+#endif
+
 	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
 		return;
 
diff --git a/arch/arm/mm/fsr-2level.c b/arch/arm/mm/fsr-2level.c
index 18ca74c..3b016e6 100644
--- a/arch/arm/mm/fsr-2level.c
+++ b/arch/arm/mm/fsr-2level.c
@@ -30,7 +30,7 @@
 	{ do_bad,		SIGBUS,  0,		"unknown 19"			   },
 	{ do_bad,		SIGBUS,  0,		"lock abort"			   }, /* xscale */
 	{ do_bad,		SIGBUS,  0,		"unknown 21"			   },
-	{ do_bad,		SIGBUS,  BUS_OBJERR,	"imprecise external abort"	   }, /* xscale */
+	{ do_imprecise_ext,	SIGBUS,  BUS_OBJERR,	"imprecise external abort"	   }, /* xscale */
 	{ do_bad,		SIGBUS,  0,		"unknown 23"			   },
 	{ do_bad,		SIGBUS,  0,		"dcache parity error"		   }, /* xscale */
 	{ do_bad,		SIGBUS,  0,		"unknown 25"			   },
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8f5813b..59e252b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/mman.h>
+#include <linux/mm.h>
 #include <linux/export.h>
 #include <linux/nodemask.h>
 #include <linux/initrd.h>
@@ -20,6 +21,7 @@
 #include <linux/highmem.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
+#include <linux/sort.h>
 
 #include <asm/mach-types.h>
 #include <asm/memblock.h>
@@ -122,7 +124,14 @@
 			else
 				shared += page_count(page) - 1;
 			page++;
+#ifdef CONFIG_SPARSEMEM
+			pfn1++;
+			if (!(pfn1 % PAGES_PER_SECTION))
+				page = pfn_to_page(pfn1);
+		} while (pfn1 < pfn2);
+#else
 		} while (page < end);
+#endif
 	}
 
 	printk("%d pages of RAM\n", total);
@@ -226,6 +235,29 @@
 }
 #endif
 
+#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+static void __init arm_bootmem_free_apnm(unsigned long max_low,
+	unsigned long max_high)
+{
+	unsigned long max_zone_pfns[MAX_NR_ZONES];
+	struct memblock_region *reg;
+
+	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+
+	max_zone_pfns[0] = max_low;
+#ifdef CONFIG_HIGHMEM
+	max_zone_pfns[ZONE_HIGHMEM] = max_high;
+#endif
+	for_each_memblock(memory, reg) {
+		unsigned long start = memblock_region_memory_base_pfn(reg);
+		unsigned long end = memblock_region_memory_end_pfn(reg);
+
+		add_active_range(0, start, end);
+	}
+	free_area_init_nodes(max_zone_pfns);
+}
+
+#else
 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
 	unsigned long max_high)
 {
@@ -283,6 +315,7 @@
 
 	free_area_init_node(0, zone_size, min, zhole_size);
 }
+#endif
 
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
 int pfn_valid(unsigned long pfn)
@@ -299,11 +332,12 @@
 #else
 static void __init arm_memory_present(void)
 {
-	struct memblock_region *reg;
-
-	for_each_memblock(memory, reg)
-		memory_present(0, memblock_region_memory_base_pfn(reg),
-			       memblock_region_memory_end_pfn(reg));
+	struct meminfo *mi = &meminfo;
+	int i;
+	for_each_bank(i, mi) {
+		memory_present(0, bank_pfn_start(&mi->bank[i]),
+				bank_pfn_end(&mi->bank[i]));
+	}
 }
 #endif
 
@@ -322,10 +356,37 @@
 	return phys;
 }
 
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+	const struct membank *a = _a, *b = _b;
+	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
+#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
+unsigned long membank0_size;
+EXPORT_SYMBOL(membank0_size);
+unsigned long membank1_start;
+EXPORT_SYMBOL(membank1_start);
+
+void __init find_membank0_hole(void)
+{
+	sort(&meminfo.bank, meminfo.nr_banks,
+		sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
+	membank0_size = meminfo.bank[0].size;
+	membank1_start = meminfo.bank[1].start;
+}
+#endif
+
 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
 {
 	int i;
 
+#ifndef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
+	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+#endif
+
 	for (i = 0; i < mi->nr_banks; i++)
 		memblock_add(mi->bank[i].start, mi->bank[i].size);
 
@@ -369,6 +430,28 @@
 	memblock_dump_all();
 }
 
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+int _early_pfn_valid(unsigned long pfn)
+{
+	struct meminfo *mi = &meminfo;
+	unsigned int left = 0, right = mi->nr_banks;
+
+	do {
+		unsigned int mid = (right + left) / 2;
+		struct membank *bank = &mi->bank[mid];
+
+		if (pfn < bank_pfn_start(bank))
+			right = mid;
+		else if (pfn >= bank_pfn_end(bank))
+			left = mid + 1;
+		else
+			return 1;
+	} while (left < right);
+	return 0;
+}
+EXPORT_SYMBOL(_early_pfn_valid);
+#endif
+
 void __init bootmem_init(void)
 {
 	unsigned long min, max_low, max_high;
@@ -390,12 +473,16 @@
 	 */
 	sparse_init();
 
+#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+	arm_bootmem_free_apnm(max_low, max_high);
+#else
 	/*
 	 * Now free the memory - free_area_init_node needs
 	 * the sparse mem_map arrays initialized by sparse_init()
 	 * for memmap_init_zone(), otherwise all PFNs are invalid.
 	 */
 	arm_bootmem_free(min, max_low, max_high);
+#endif
 
 	/*
 	 * This doesn't seem to be used by the Linux memory manager any
@@ -466,7 +553,10 @@
 }
 
 /*
- * The mem_map array can get very big.  Free the unused area of the memory map.
+ * The mem_map array can get very big.  Free as much of the unused portion of
+ * the mem_map that we are allowed to. The page migration code moves pages
+ * in blocks that are rounded per the MAX_ORDER_NR_PAGES definition, so we
+ * can't free mem_map entries that may be dereferenced in this manner.
  */
 static void __init free_unused_memmap(struct meminfo *mi)
 {
@@ -480,7 +570,8 @@
 	for_each_bank(i, mi) {
 		struct membank *bank = &mi->bank[i];
 
-		bank_start = bank_pfn_start(bank);
+		bank_start = round_down(bank_pfn_start(bank),
+					MAX_ORDER_NR_PAGES);
 
 #ifdef CONFIG_SPARSEMEM
 		/*
@@ -504,12 +595,8 @@
 		if (prev_bank_end && prev_bank_end < bank_start)
 			free_memmap(prev_bank_end, bank_start);
 
-		/*
-		 * Align up here since the VM subsystem insists that the
-		 * memmap entries are valid from the bank end aligned to
-		 * MAX_ORDER_NR_PAGES.
-		 */
-		prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
+		prev_bank_end = round_up(bank_pfn_end(bank),
+					 MAX_ORDER_NR_PAGES);
 	}
 
 #ifdef CONFIG_SPARSEMEM
@@ -584,6 +671,9 @@
 	extern u32 dtcm_end;
 	extern u32 itcm_end;
 #endif
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+	struct zone *zone;
+#endif
 
 	max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
 
@@ -619,9 +709,24 @@
 			else if (!page_count(page))
 				free_pages++;
 			page++;
+#ifdef CONFIG_SPARSEMEM
+			pfn1++;
+			if (!(pfn1 % PAGES_PER_SECTION))
+				page = pfn_to_page(pfn1);
+		} while (pfn1 < pfn2);
+#else
 		} while (page < end);
+#endif
 	}
 
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+	for_each_zone(zone) {
+		if (zone_idx(zone) == ZONE_MOVABLE)
+			total_unmovable_pages = totalram_pages -
+							zone->spanned_pages;
+	}
+#endif
+
 	/*
 	 * Since our memory may not be contiguous, calculate the
 	 * real number of pages we have in this system
@@ -719,6 +824,7 @@
 
 void free_initmem(void)
 {
+	unsigned long reclaimed_initmem;
 #ifdef CONFIG_HAVE_TCM
 	extern char __tcm_start, __tcm_end;
 
@@ -729,23 +835,61 @@
 #endif
 
 	poison_init_mem(__init_begin, __init_end - __init_begin);
-	if (!machine_is_integrator() && !machine_is_cintegrator())
-		totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
+	if (!machine_is_integrator() && !machine_is_cintegrator()) {
+		reclaimed_initmem = free_area(__phys_to_pfn(__pa(__init_begin)),
 					    __phys_to_pfn(__pa(__init_end)),
 					    "init");
+		totalram_pages += reclaimed_initmem;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+		total_unmovable_pages += reclaimed_initmem;
+#endif
+	}
 }
 
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+	struct pglist_data *pgdata = NODE_DATA(nid);
+	struct zone *zone = pgdata->node_zones + ZONE_MOVABLE;
+	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long nr_pages = size >> PAGE_SHIFT;
+
+	return __add_pages(nid, zone, start_pfn, nr_pages);
+}
+
+int arch_physical_active_memory(u64 start, u64 size)
+{
+	return platform_physical_active_pages(start, size);
+}
+
+int arch_physical_remove_memory(u64 start, u64 size)
+{
+	return platform_physical_remove_pages(start, size);
+}
+
+int arch_physical_low_power_memory(u64 start, u64 size)
+{
+	return platform_physical_low_power_pages(start, size);
+}
+#endif
+
 #ifdef CONFIG_BLK_DEV_INITRD
 
 static int keep_initrd;
 
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
+	unsigned long reclaimed_initrd_mem;
+
 	if (!keep_initrd) {
 		poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
-		totalram_pages += free_area(__phys_to_pfn(__pa(start)),
-					    __phys_to_pfn(__pa(end)),
-					    "initrd");
+		reclaimed_initrd_mem = free_area(__phys_to_pfn(__pa(start)),
+						 __phys_to_pfn(__pa(end)),
+						 "initrd");
+		totalram_pages += reclaimed_initrd_mem;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+		total_unmovable_pages += reclaimed_initrd_mem;
+#endif
 	}
 }
 
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 4f55f50..8df41e2 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -46,6 +46,14 @@
 }
 EXPORT_SYMBOL(ioremap_page);
 
+int ioremap_pages(unsigned long virt, unsigned long phys, unsigned long size,
+		 const struct mem_type *mtype)
+{
+	return ioremap_page_range(virt, virt + size, phys,
+				  __pgprot(mtype->prot_pte));
+}
+EXPORT_SYMBOL(ioremap_pages);
+
 void __check_kvm_seq(struct mm_struct *mm)
 {
 	unsigned int seq;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 27f4a61..411fbd9 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -67,5 +67,7 @@
 #define arm_dma_limit ((u32)~0)
 #endif
 
+struct map_desc;
+
 void __init bootmem_init(void);
 void arm_mm_memblock_reserve(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 137858b..e6b733b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -28,6 +28,7 @@
 #include <asm/highmem.h>
 #include <asm/system_info.h>
 #include <asm/traps.h>
+#include <asm/mmu_writeable.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -52,6 +53,9 @@
 #define CPOLICY_WRITEBACK	3
 #define CPOLICY_WRITEALLOC	4
 
+#define RX_AREA_START           _text
+#define RX_AREA_END             __start_rodata
+
 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
 static unsigned int ecc_mask __initdata = 0;
 pgprot_t pgprot_user;
@@ -257,6 +261,18 @@
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.domain    = DOMAIN_KERNEL,
 	},
+	[MT_MEMORY_R] = {
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+		.domain    = DOMAIN_KERNEL,
+	},
+	[MT_MEMORY_RW] = {
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
+		.domain    = DOMAIN_KERNEL,
+	},
+	[MT_MEMORY_RX] = {
+		.prot_sect = PMD_TYPE_SECT,
+		.domain    = DOMAIN_KERNEL,
+	},
 	[MT_ROM] = {
 		.prot_sect = PMD_TYPE_SECT,
 		.domain    = DOMAIN_KERNEL,
@@ -442,6 +458,8 @@
 		 * from SVC mode and no access from userspace.
 		 */
 		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+		mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+		mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 #endif
@@ -461,6 +479,9 @@
 			mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 			mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
 			mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+			mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
+			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+			mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
 			mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 		}
 	}
@@ -513,6 +534,9 @@
 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
 	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
 	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
+	mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
+	mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+	mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
 	mem_types[MT_ROM].prot_sect |= cp->pmd;
 
 	switch (cp->pmd) {
@@ -576,6 +600,7 @@
 	BUG_ON(pmd_bad(*pmd));
 }
 
+#ifdef CONFIG_HIGHMEM
 static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
 	unsigned long addr, unsigned long prot)
 {
@@ -586,6 +611,7 @@
 	BUG_ON(pmd_bad(*pmd));
 	return pte_offset_kernel(pmd, addr);
 }
+#endif
 
 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
 				  unsigned long end, unsigned long pfn,
@@ -842,6 +868,14 @@
 {
 	int i, j, highmem = 0;
 
+#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
+	find_membank0_hole();
+#endif
+
+#if (defined CONFIG_HIGHMEM) && (defined CONFIG_FIX_MOVABLE_ZONE)
+	if (movable_reserved_size && __pa(vmalloc_min) > movable_reserved_start)
+		vmalloc_min = __va(movable_reserved_start);
+#endif
 	for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
 		struct membank *bank = &meminfo.bank[j];
 		*bank = meminfo.bank[i];
@@ -1105,6 +1139,105 @@
 #endif
 }
 
+#ifdef CONFIG_STRICT_MEMORY_RWX
+static struct {
+	pmd_t *pmd_to_flush;
+	pmd_t *pmd;
+	unsigned long addr;
+	pmd_t saved_pmd;
+	bool made_writeable;
+} mem_unprotect;
+
+static DEFINE_SPINLOCK(mem_text_writeable_lock);
+
+void mem_text_writeable_spinlock(unsigned long *flags)
+{
+	spin_lock_irqsave(&mem_text_writeable_lock, *flags);
+}
+
+void mem_text_writeable_spinunlock(unsigned long *flags)
+{
+	spin_unlock_irqrestore(&mem_text_writeable_lock, *flags);
+}
+
+/*
+ * mem_text_address_writeable() and mem_text_address_restore()
+ * should be called as a pair. They are used to make the
+ * specified address in the kernel text section temporarily writeable
+ * when it has been marked read-only by STRICT_MEMORY_RWX.
+ * Used by kprobes and other debugging tools to set breakpoints etc.
+ * mem_text_address_writeable() is invoked before writing.
+ * After the write, mem_text_address_restore() must be called
+ * to restore the original state.
+ * This is only effective when used on the kernel text section
+ * marked as MEMORY_RX by map_lowmem()
+ *
+ * They must each be called with mem_text_writeable_lock locked
+ * by the caller, with no unlocking between the calls.
+ * The caller should release mem_text_writeable_lock immediately
+ * after the call to mem_text_address_restore().
+ * Only the write and associated cache operations should be performed
+ * between the calls.
+ */
+
+/* this function must be called with mem_text_writeable_lock held */
+void mem_text_address_writeable(unsigned long addr)
+{
+	struct task_struct *tsk = current;
+	struct mm_struct *mm = tsk->active_mm;
+	pgd_t *pgd = pgd_offset(mm, addr);
+	pud_t *pud = pud_offset(pgd, addr);
+
+	mem_unprotect.made_writeable = 0;
+
+	if ((addr < (unsigned long)RX_AREA_START) ||
+	    (addr >= (unsigned long)RX_AREA_END))
+		return;
+
+	mem_unprotect.pmd = pmd_offset(pud, addr);
+	mem_unprotect.pmd_to_flush = mem_unprotect.pmd;
+	mem_unprotect.addr = addr & PAGE_MASK;
+
+	if (addr & SECTION_SIZE)
+			mem_unprotect.pmd++;
+
+	mem_unprotect.saved_pmd = *mem_unprotect.pmd;
+	if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) != PMD_TYPE_SECT)
+		return;
+
+	*mem_unprotect.pmd &= ~PMD_SECT_APX;
+
+	flush_pmd_entry(mem_unprotect.pmd_to_flush);
+	flush_tlb_kernel_page(mem_unprotect.addr);
+	mem_unprotect.made_writeable = 1;
+}
+
+/* this function must be called with mem_text_writeable_lock held */
+void mem_text_address_restore(void)
+{
+	if (mem_unprotect.made_writeable) {
+		*mem_unprotect.pmd = mem_unprotect.saved_pmd;
+		flush_pmd_entry(mem_unprotect.pmd_to_flush);
+		flush_tlb_kernel_page(mem_unprotect.addr);
+	}
+}
+#endif
+
+void mem_text_write_kernel_word(unsigned long *addr, unsigned long word)
+{
+	unsigned long flags;
+
+	mem_text_writeable_spinlock(&flags);
+	mem_text_address_writeable((unsigned long)addr);
+	*addr = word;
+	flush_icache_range((unsigned long)addr,
+			   ((unsigned long)addr + sizeof(long)));
+	mem_text_address_restore();
+	mem_text_writeable_spinunlock(&flags);
+}
+EXPORT_SYMBOL(mem_text_write_kernel_word);
+
+extern char __init_data[];
 
 static void __init map_lowmem(void)
 {
@@ -1125,8 +1258,46 @@
 
 		map.pfn = __phys_to_pfn(start);
 		map.virtual = __phys_to_virt(start);
+#ifdef CONFIG_STRICT_MEMORY_RWX
+		if (start <= __pa(_text) && __pa(_text) < end) {
+			map.length = SECTION_SIZE;
+			map.type = MT_MEMORY;
+
+			create_mapping(&map, false);
+
+			map.pfn = __phys_to_pfn(start + SECTION_SIZE);
+			map.virtual = __phys_to_virt(start + SECTION_SIZE);
+			map.length = (unsigned long)RX_AREA_END - map.virtual;
+			map.type = MT_MEMORY_RX;
+
+			create_mapping(&map, false);
+
+			map.pfn = __phys_to_pfn(__pa(__start_rodata));
+			map.virtual = (unsigned long)__start_rodata;
+			map.length = __init_begin - __start_rodata;
+			map.type = MT_MEMORY_R;
+
+			create_mapping(&map, false);
+
+			map.pfn = __phys_to_pfn(__pa(__init_begin));
+			map.virtual = (unsigned long)__init_begin;
+			map.length = __init_data - __init_begin;
+			map.type = MT_MEMORY;
+
+			create_mapping(&map, false);
+
+			map.pfn = __phys_to_pfn(__pa(__init_data));
+			map.virtual = (unsigned long)__init_data;
+			map.length = __phys_to_virt(end) - (unsigned int)__init_data;
+			map.type = MT_MEMORY_RW;
+		} else {
+			map.length = end - start;
+			map.type = MT_MEMORY_RW;
+		}
+#else
 		map.length = end - start;
 		map.type = MT_MEMORY;
+#endif
 
 		create_mapping(&map, false);
 	}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 2d8ff3a..5829bb3 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -306,6 +306,8 @@
 	.long	\name\()_flush_kern_dcache_area
 	.long	\name\()_dma_map_area
 	.long	\name\()_dma_unmap_area
+	.long	\name\()_dma_inv_range
+	.long	\name\()_dma_clean_range
 	.long	\name\()_dma_flush_range
 	.size	\name\()_cache_fns, . - \name\()_cache_fns
 .endm
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 5900cd5..501397a 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -107,6 +107,12 @@
 	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB
 	mcr	p15, 0, r2, c7, c10, 4		@ drain write buffer
 	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+	mrc	p15, 0, r2, c13, c0, 1		@ read current context ID
+	bic	r2, r2, #0xff			@ extract the PID
+	and	r1, r1, #0xff
+	orr	r1, r1, r2			@ insert the PID into r1
+#endif
 	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
 #endif
 	mov	pc, lr
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 3a4b3e7..1fda38b 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -49,6 +49,12 @@
 #ifdef CONFIG_ARM_ERRATA_754322
 	dsb
 #endif
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+	mrc     p15, 0, r2, c13, c0, 1          @ read current context ID
+	bic     r2, r2, #0xff                   @ extract the PID
+	and     r1, r1, #0xff
+	orr     r1, r1, r2                      @ insert the PID into r1
+#endif
 	mcr	p15, 0, r2, c13, c0, 1		@ set reserved context ID
 	isb
 1:	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
@@ -140,7 +146,11 @@
 	 *   NOS = PRRR[24+n] = 1	- not outer shareable
 	 */
 .equ	PRRR,	0xff0a81a8
+#ifdef CONFIG_ARCH_MSM_SCORPIONMP
+.equ	NMRR,	0x40e080e0
+#else
 .equ	NMRR,	0x40e040e0
+#endif
 
 	/*
 	 * Macro for setting up the TTBRx and TTBCR registers.
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c2e2b66..47dab27 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -56,6 +56,9 @@
 	bic	r1, r1, #0x1			@ ...............m
  THUMB(	bic	r1, r1, #1 << 30 )		@ SCTLR.TE (Thumb exceptions)
 	mcr	p15, 0, r1, c1, c0, 0		@ disable MMU
+	mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D,flush TLB
+	mcr     p15, 0, ip, c7, c5, 6           @ flush BTC
+	dsb
 	isb
 	mov	pc, r0
 ENDPROC(cpu_v7_reset)
@@ -255,6 +258,31 @@
 	mcr	p15, 0, r5, c10, c2, 0		@ write PRRR
 	mcr	p15, 0, r6, c10, c2, 1		@ write NMRR
 #endif
+
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+	mov     r0, #0x33
+	mcr     p15, 3, r0, c15, c0, 3          @ set L2CR1
+#endif
+#if defined (CONFIG_ARCH_MSM_SCORPION)
+	mrc     p15, 0, r0, c1, c0, 1           @ read ACTLR
+#ifdef CONFIG_CPU_CACHE_ERR_REPORT
+	orr     r0, r0, #0x37                   @ turn on L1/L2 error reporting
+#else
+	bic     r0, r0, #0x37
+#endif
+#if defined (CONFIG_ARCH_MSM_SCORPIONMP)
+	orr    r0, r0, #0x1 << 24     @ optimal setting for Scorpion MP
+#endif
+#ifndef CONFIG_ARCH_MSM_KRAIT
+	mcr     p15, 0, r0, c1, c0, 1           @ write ACTLR
+#endif
+#endif
+#if defined (CONFIG_ARCH_MSM_SCORPIONMP)
+	mrc     p15, 3, r0, c15, c0, 2  @ optimal setting for Scorpion MP
+	orr         r0, r0, #0x1 << 21
+	mcr     p15, 3, r0, c15, c0, 2
+#endif
+
 #ifndef CONFIG_ARM_THUMBEE
 	mrc	p15, 0, r0, c0, c1, 0		@ read ID_PFR0 for ThumbEE
 	and	r0, r0, #(0xf << 12)		@ ThumbEE enabled field
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 845f461..0e88578 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -38,11 +38,19 @@
 	dsb
 	mov	r0, r0, lsr #PAGE_SHIFT		@ align address
 	mov	r1, r1, lsr #PAGE_SHIFT
+#ifdef CONFIG_ARCH_MSM8X60
+	mov	r0, r0, lsl #PAGE_SHIFT
+#else
 	asid	r3, r3				@ mask ASID
 	orr	r0, r3, r0, lsl #PAGE_SHIFT	@ Create initial MVA
+#endif
 	mov	r1, r1, lsl #PAGE_SHIFT
 1:
+#ifdef CONFIG_ARCH_MSM8X60
+	ALT_SMP(mcr	p15, 0, r0, c8, c3, 3)	@ TLB invalidate U MVA (shareable)
+#else
 	ALT_SMP(mcr	p15, 0, r0, c8, c3, 1)	@ TLB invalidate U MVA (shareable)
+#endif
 	ALT_UP(mcr	p15, 0, r0, c8, c7, 1)	@ TLB invalidate U MVA
 
 	add	r0, r0, #PAGE_SZ
@@ -67,7 +75,11 @@
 	mov	r0, r0, lsl #PAGE_SHIFT
 	mov	r1, r1, lsl #PAGE_SHIFT
 1:
+#ifdef CONFIG_ARCH_MSM8X60
+	ALT_SMP(mcr	p15, 0, r0, c8, c3, 3)	@ TLB invalidate U MVA (shareable)
+#else
 	ALT_SMP(mcr	p15, 0, r0, c8, c3, 1)	@ TLB invalidate U MVA (shareable)
+#endif
 	ALT_UP(mcr	p15, 0, r0, c8, c7, 1)	@ TLB invalidate U MVA
 	add	r0, r0, #PAGE_SZ
 	cmp	r0, r1
diff --git a/arch/arm/mm/vcm.c b/arch/arm/mm/vcm.c
new file mode 100644
index 0000000..f2d9457
--- /dev/null
+++ b/arch/arm/mm/vcm.c
@@ -0,0 +1,1830 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vcm_mm.h>
+#include <linux/vcm.h>
+#include <linux/vcm_alloc.h>
+#include <linux/vcm_types.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+
+#include <asm/page.h>
+#include <asm/sizes.h>
+
+#include <linux/iommu.h>
+
+/* alloc_vm_area */
+#include <linux/pfn.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/mach/map.h>
+
+#define ONE_TO_ONE_CHK 1
+
+#define vcm_err(a, ...)							\
+	pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__)
+
+static unsigned int smmu_map_sizes[4] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K};
+
+static phys_addr_t *bootmem_cont;
+static int cont_sz;
+static struct vcm *cont_vcm_id;
+static struct phys_chunk *cont_phys_chunk;
+
+DEFINE_SPINLOCK(vcmlock);
+
+/* Leaving this in for now to keep compatibility of the API. */
+/* This will disappear. */
+phys_addr_t vcm_get_dev_addr(struct res *res)
+{
+	if (!res) {
+		vcm_err("NULL RES");
+		return -EINVAL;
+	}
+	return res->dev_addr;
+}
+
+static int vcm_no_res(struct vcm *vcm)
+{
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	return list_empty(&vcm->res_head);
+fail:
+	return -EINVAL;
+}
+
+static int vcm_no_assoc(struct vcm *vcm)
+{
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	return list_empty(&vcm->assoc_head);
+fail:
+	return -EINVAL;
+}
+
+static int vcm_all_activated(struct vcm *vcm)
+{
+	struct avcm *avcm;
+
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	list_for_each_entry(avcm, &vcm->assoc_head, assoc_elm)
+		if (!avcm->is_active)
+			return 0;
+
+	return 1;
+fail:
+	return -EINVAL;
+}
+
+static void vcm_destroy_common(struct vcm *vcm)
+{
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		return;
+	}
+
+	memset(vcm, 0, sizeof(*vcm));
+	kfree(vcm);
+}
+
+static struct vcm *vcm_create_common(void)
+{
+	struct vcm *vcm = 0;
+
+	vcm = kzalloc(sizeof(*vcm), GFP_KERNEL);
+	if (!vcm) {
+		vcm_err("kzalloc(%i, GFP_KERNEL) ret 0\n",
+			sizeof(*vcm));
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&vcm->res_head);
+	INIT_LIST_HEAD(&vcm->assoc_head);
+
+	return vcm;
+
+fail:
+	return NULL;
+}
+
+
+static int vcm_create_pool(struct vcm *vcm, unsigned long start_addr,
+			   size_t len)
+{
+	int ret = 0;
+
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	vcm->start_addr = start_addr;
+	vcm->len = len;
+
+	vcm->pool = gen_pool_create(PAGE_SHIFT, -1);
+	if (!vcm->pool) {
+		vcm_err("gen_pool_create(%x, -1) ret 0\n", PAGE_SHIFT);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	ret = gen_pool_add(vcm->pool, start_addr, len, -1);
+	if (ret) {
+		vcm_err("gen_pool_add(%p, %p, %i, -1) ret %i\n", vcm->pool,
+			(void *) start_addr, len, ret);
+		goto fail;
+	}
+
+	vcm->domain = iommu_domain_alloc();
+	if (!vcm->domain) {
+		vcm_err("Could not allocate domain\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+fail:
+	if (ret && vcm->pool)
+		gen_pool_destroy(vcm->pool);
+
+	return ret;
+}
+
+
+static struct vcm *vcm_create_flagged(int flag, unsigned long start_addr,
+				      size_t len)
+{
+	int ret = 0;
+	struct vcm *vcm = 0;
+
+	vcm = vcm_create_common();
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	/* special one-to-one mapping case */
+	if ((flag & ONE_TO_ONE_CHK) &&
+	    bootmem_cont &&
+	    start_addr == (size_t) bootmem_cont &&
+	    len == cont_sz) {
+		vcm->type = VCM_ONE_TO_ONE;
+	} else {
+		ret = vcm_create_pool(vcm, start_addr, len);
+		vcm->type = VCM_DEVICE;
+	}
+
+	if (ret) {
+		vcm_err("vcm_create_pool(%p, %p, %i) ret %i\n", vcm,
+			(void *) start_addr, len, ret);
+		goto fail2;
+	}
+
+	return vcm;
+
+fail2:
+	vcm_destroy_common(vcm);
+fail:
+	return NULL;
+}
+
+struct vcm *vcm_create(unsigned long start_addr, size_t len)
+{
+	unsigned long flags;
+	struct vcm *vcm;
+
+	spin_lock_irqsave(&vcmlock, flags);
+	vcm = vcm_create_flagged(ONE_TO_ONE_CHK, start_addr, len);
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return vcm;
+}
+
+
+static int ext_vcm_id_valid(size_t ext_vcm_id)
+{
+	return ((ext_vcm_id == VCM_PREBUILT_KERNEL) ||
+		(ext_vcm_id == VCM_PREBUILT_USER));
+}
+
+
+struct vcm *vcm_create_from_prebuilt(size_t ext_vcm_id)
+{
+	unsigned long flags;
+	struct vcm *vcm = 0;
+
+	spin_lock_irqsave(&vcmlock, flags);
+
+	if (!ext_vcm_id_valid(ext_vcm_id)) {
+		vcm_err("ext_vcm_id_valid(%i) ret 0\n", ext_vcm_id);
+		goto fail;
+	}
+
+	vcm = vcm_create_common();
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	if (ext_vcm_id == VCM_PREBUILT_KERNEL)
+		vcm->type = VCM_EXT_KERNEL;
+	else if (ext_vcm_id == VCM_PREBUILT_USER)
+		vcm->type = VCM_EXT_USER;
+	else {
+		vcm_err("UNREACHABLE ext_vcm_id is illegal\n");
+		goto fail_free;
+	}
+
+	/* TODO: set kernel and userspace start_addr and len, if this
+	 * makes sense */
+
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return vcm;
+
+fail_free:
+	vcm_destroy_common(vcm);
+fail:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return NULL;
+}
+
+
+struct vcm *vcm_clone(struct vcm *vcm)
+{
+	return 0;
+}
+
+
+/* No lock needed, vcm->start_addr is never updated after creation */
+size_t vcm_get_start_addr(struct vcm *vcm)
+{
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		return 1;
+	}
+
+	return vcm->start_addr;
+}
+
+
+/* No lock needed, vcm->len is never updated after creation */
+size_t vcm_get_len(struct vcm *vcm)
+{
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		return 0;
+	}
+
+	return vcm->len;
+}
+
+
+static int vcm_free_common_rule(struct vcm *vcm)
+{
+	int ret;
+
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	ret = vcm_no_res(vcm);
+	if (!ret) {
+		vcm_err("vcm_no_res(%p) ret 0\n", vcm);
+		goto fail_busy;
+	}
+
+	if (ret == -EINVAL) {
+		vcm_err("vcm_no_res(%p) ret -EINVAL\n", vcm);
+		goto fail;
+	}
+
+	ret = vcm_no_assoc(vcm);
+	if (!ret) {
+		vcm_err("vcm_no_assoc(%p) ret 0\n", vcm);
+		goto fail_busy;
+	}
+
+	if (ret == -EINVAL) {
+		vcm_err("vcm_no_assoc(%p) ret -EINVAL\n", vcm);
+		goto fail;
+	}
+
+	return 0;
+
+fail_busy:
+	return -EBUSY;
+fail:
+	return -EINVAL;
+}
+
+
+static int vcm_free_pool_rule(struct vcm *vcm)
+{
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	/* A vcm always has a valid pool, don't free the vcm because
+	   what we got is probably invalid.
+	*/
+	if (!vcm->pool) {
+		vcm_err("NULL vcm->pool\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+
+static void vcm_free_common(struct vcm *vcm)
+{
+	memset(vcm, 0, sizeof(*vcm));
+
+	kfree(vcm);
+}
+
+
+static int vcm_free_pool(struct vcm *vcm)
+{
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	gen_pool_destroy(vcm->pool);
+
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+
+static int __vcm_free(struct vcm *vcm)
+{
+	int ret;
+
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	ret = vcm_free_common_rule(vcm);
+	if (ret != 0) {
+		vcm_err("vcm_free_common_rule(%p) ret %i\n", vcm, ret);
+		goto fail;
+	}
+
+	if (vcm->type == VCM_DEVICE) {
+		ret = vcm_free_pool_rule(vcm);
+		if (ret != 0) {
+			vcm_err("vcm_free_pool_rule(%p) ret %i\n",
+				(void *) vcm, ret);
+			goto fail;
+		}
+		if (vcm->domain)
+			iommu_domain_free(vcm->domain);
+
+		vcm->domain = NULL;
+		ret = vcm_free_pool(vcm);
+		if (ret != 0) {
+			vcm_err("vcm_free_pool(%p) ret %i", (void *) vcm, ret);
+			goto fail;
+		}
+	}
+
+	vcm_free_common(vcm);
+
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+int vcm_free(struct vcm *vcm)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&vcmlock, flags);
+	ret = __vcm_free(vcm);
+	spin_unlock_irqrestore(&vcmlock, flags);
+
+	return ret;
+}
+
+
+static struct res *__vcm_reserve(struct vcm *vcm, size_t len, u32 attr)
+{
+	struct res *res = NULL;
+	int align_attr = 0, i = 0;
+
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	if (len == 0) {
+		vcm_err("len is 0\n");
+		goto fail;
+	}
+
+	res = kzalloc(sizeof(*res), GFP_KERNEL);
+	if (!res) {
+		vcm_err("kzalloc(%i, GFP_KERNEL) ret 0", sizeof(*res));
+		goto fail;
+	}
+
+	align_attr = (attr >> VCM_ALIGN_SHIFT) & VCM_ALIGN_MASK;
+
+	if (align_attr >= 32) {
+		vcm_err("Invalid alignment attribute: %d\n", align_attr);
+		goto fail2;
+	}
+
+	INIT_LIST_HEAD(&res->res_elm);
+	res->vcm = vcm;
+	res->len = len;
+	res->attr = attr;
+	res->alignment_req = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1];
+
+	if (align_attr == 0) {
+		for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++)
+			if (len / smmu_map_sizes[i]) {
+				res->alignment_req = smmu_map_sizes[i];
+				break;
+			}
+	} else
+		res->alignment_req = 1 << align_attr;
+
+	res->aligned_len = res->alignment_req + len;
+
+	switch (vcm->type) {
+	case VCM_DEVICE:
+		/* should always be not zero */
+		if (!vcm->pool) {
+			vcm_err("NULL vcm->pool\n");
+			goto fail2;
+		}
+
+		res->ptr = gen_pool_alloc(vcm->pool, res->aligned_len);
+		if (!res->ptr) {
+			vcm_err("gen_pool_alloc(%p, %i) ret 0\n",
+				vcm->pool, res->aligned_len);
+			goto fail2;
+		}
+
+		/* Calculate alignment... this will all change anyway */
+		res->dev_addr = res->ptr +
+			(res->alignment_req -
+			 (res->ptr & (res->alignment_req - 1)));
+
+		break;
+	case VCM_EXT_KERNEL:
+		res->vm_area = alloc_vm_area(res->aligned_len);
+		res->mapped = 0; /* be explicit */
+		if (!res->vm_area) {
+			vcm_err("NULL res->vm_area\n");
+			goto fail2;
+		}
+
+		res->dev_addr = (size_t) res->vm_area->addr +
+			(res->alignment_req -
+			 ((size_t) res->vm_area->addr &
+			  (res->alignment_req - 1)));
+
+		break;
+	case VCM_ONE_TO_ONE:
+		break;
+	default:
+		vcm_err("%i is an invalid vcm->type\n", vcm->type);
+		goto fail2;
+	}
+
+	list_add_tail(&res->res_elm, &vcm->res_head);
+
+	return res;
+
+fail2:
+	kfree(res);
+fail:
+	return 0;
+}
+
+
+struct res *vcm_reserve(struct vcm *vcm, size_t len, u32 attr)
+{
+	unsigned long flags;
+	struct res *res;
+
+	spin_lock_irqsave(&vcmlock, flags);
+	res = __vcm_reserve(vcm, len, attr);
+	spin_unlock_irqrestore(&vcmlock, flags);
+
+	return res;
+}
+
+
+struct res *vcm_reserve_at(enum memtarget_t memtarget, struct vcm *vcm,
+			   size_t len, u32 attr)
+{
+	return 0;
+}
+
+
+static int __vcm_unreserve(struct res *res)
+{
+	struct vcm *vcm;
+
+	if (!res) {
+		vcm_err("NULL res\n");
+		goto fail;
+	}
+
+	if (!res->vcm) {
+		vcm_err("NULL res->vcm\n");
+		goto fail;
+	}
+
+	vcm = res->vcm;
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	switch (vcm->type) {
+	case VCM_DEVICE:
+		if (!res->vcm->pool) {
+			vcm_err("NULL (res->vcm))->pool\n");
+			goto fail;
+		}
+
+		/* res->ptr could be zero, this isn't an error */
+		gen_pool_free(res->vcm->pool, res->ptr,
+			      res->aligned_len);
+		break;
+	case VCM_EXT_KERNEL:
+		if (res->mapped) {
+			vcm_err("res->mapped is true\n");
+			goto fail;
+		}
+
+		/* This may take a little explaining.
+		 * In the kernel vunmap will free res->vm_area
+		 * so if we've called it then we shouldn't call
+		 * free_vm_area(). If we've called it we set
+		 * res->vm_area to 0.
+		 */
+		if (res->vm_area) {
+			free_vm_area(res->vm_area);
+			res->vm_area = 0;
+		}
+
+		break;
+	case VCM_ONE_TO_ONE:
+		break;
+	default:
+		vcm_err("%i is an invalid vcm->type\n", vcm->type);
+		goto fail;
+	}
+
+	list_del(&res->res_elm);
+
+	/* be extra careful by clearing the memory before freeing it */
+	memset(res, 0, sizeof(*res));
+
+	kfree(res);
+
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+
+int vcm_unreserve(struct res *res)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&vcmlock, flags);
+	ret = __vcm_unreserve(res);
+	spin_unlock_irqrestore(&vcmlock, flags);
+
+	return ret;
+}
+
+
+/* No lock needed, res->len is never updated after creation */
+size_t vcm_get_res_len(struct res *res)
+{
+	if (!res) {
+		vcm_err("res is 0\n");
+		return 0;
+	}
+
+	return res->len;
+}
+
+
+int vcm_set_res_attr(struct res *res, u32 attr)
+{
+	return 0;
+}
+
+
+u32 vcm_get_res_attr(struct res *res)
+{
+	return 0;
+}
+
+
+size_t vcm_get_num_res(struct vcm *vcm)
+{
+	return 0;
+}
+
+
+struct res *vcm_get_next_res(struct vcm *vcm, struct res *res)
+{
+	return 0;
+}
+
+
+size_t vcm_res_copy(struct res *to, size_t to_off, struct res *from, size_t
+		    from_off, size_t len)
+{
+	return 0;
+}
+
+
+size_t vcm_get_min_page_size(void)
+{
+	return PAGE_SIZE;
+}
+
+
+static int vcm_to_smmu_attr(u32 attr)
+{
+	int smmu_attr = 0;
+
+	switch (attr & VCM_CACHE_POLICY) {
+	case VCM_NOTCACHED:
+		smmu_attr = VCM_DEV_ATTR_NONCACHED;
+		break;
+	case VCM_WB_WA:
+		smmu_attr = VCM_DEV_ATTR_CACHED_WB_WA;
+		smmu_attr |= VCM_DEV_ATTR_SH;
+		break;
+	case VCM_WB_NWA:
+		smmu_attr = VCM_DEV_ATTR_CACHED_WB_NWA;
+		smmu_attr |= VCM_DEV_ATTR_SH;
+		break;
+	case VCM_WT:
+		smmu_attr = VCM_DEV_ATTR_CACHED_WT;
+		smmu_attr |= VCM_DEV_ATTR_SH;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return smmu_attr;
+}
+
+
+static int vcm_process_chunk(struct iommu_domain *domain, phys_addr_t pa,
+			     unsigned long va, size_t len, u32 attr, int map)
+{
+	int ret, i, map_order;
+	unsigned long map_len = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1];
+
+	for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++) {
+		if (IS_ALIGNED(va, smmu_map_sizes[i]) && len >=
+							smmu_map_sizes[i]) {
+			map_len = smmu_map_sizes[i];
+			break;
+		}
+	}
+
+#ifdef VCM_PERF_DEBUG
+	if (va & (len - 1))
+		pr_warning("Warning! Suboptimal VCM mapping alignment "
+			   "va = %p, len = %p. Expect TLB performance "
+			   "degradation.\n", (void *) va, (void *) len);
+#endif
+
+	map_order = get_order(map_len);
+
+	while (len) {
+		if (va & (SZ_4K - 1)) {
+			vcm_err("Tried to map w/ align < 4k! va = %08lx\n", va);
+			goto fail;
+		}
+
+		if (map_len > len) {
+			vcm_err("map_len = %lu, len = %d, trying to overmap\n",
+				 map_len, len);
+			goto fail;
+		}
+
+		if (map)
+			ret = iommu_map(domain, va, pa, map_len, attr);
+		else
+			ret = iommu_unmap(domain, va, map_len);
+
+		if (ret) {
+			vcm_err("iommu_map/unmap(%p, %p, %p, 0x%x, 0x%x) ret %i"
+				"map = %d", (void *) domain, (void *) pa,
+				(void *) va, (int) map_len, attr, ret, map);
+			goto fail;
+		}
+
+		va += map_len;
+		pa += map_len;
+		len -= map_len;
+	}
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+/* TBD if you vcm_back again what happens? */
+int vcm_back(struct res *res, struct physmem *physmem)
+{
+	unsigned long flags;
+	struct vcm *vcm;
+	struct phys_chunk *chunk;
+	size_t va = 0;
+	int ret;
+	int attr;
+
+	spin_lock_irqsave(&vcmlock, flags);
+
+	if (!res) {
+		vcm_err("NULL res\n");
+		goto fail;
+	}
+
+	vcm = res->vcm;
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	switch (vcm->type) {
+	case VCM_DEVICE:
+	case VCM_EXT_KERNEL: /* hack part 1 */
+		attr = vcm_to_smmu_attr(res->attr);
+		if (attr == -1) {
+			vcm_err("Bad SMMU attr\n");
+			goto fail;
+		}
+		break;
+	default:
+		attr = 0;
+		break;
+	}
+
+	if (!physmem) {
+		vcm_err("NULL physmem\n");
+		goto fail;
+	}
+
+	if (res->len == 0) {
+		vcm_err("res->len is 0\n");
+		goto fail;
+	}
+
+	if (physmem->len == 0) {
+		vcm_err("physmem->len is 0\n");
+		goto fail;
+	}
+
+	if (res->len != physmem->len) {
+		vcm_err("res->len (%i) != physmem->len (%i)\n",
+			res->len, physmem->len);
+		goto fail;
+	}
+
+	if (physmem->is_cont) {
+		if (physmem->res == 0) {
+			vcm_err("cont physmem->res is 0");
+			goto fail;
+		}
+	} else {
+		/* fail if no physmem */
+		if (list_empty(&physmem->alloc_head.allocated)) {
+			vcm_err("no allocated phys memory");
+			goto fail;
+		}
+	}
+
+	ret = vcm_no_assoc(res->vcm);
+	if (ret == 1) {
+		vcm_err("can't back un associated VCM\n");
+		goto fail;
+	}
+
+	if (ret == -1) {
+		vcm_err("vcm_no_assoc() ret -1\n");
+		goto fail;
+	}
+
+	ret = vcm_all_activated(res->vcm);
+	if (ret == 0) {
+		vcm_err("can't back, not all associations are activated\n");
+		goto fail_eagain;
+	}
+
+	if (ret == -1) {
+		vcm_err("vcm_all_activated() ret -1\n");
+		goto fail;
+	}
+
+	va = res->dev_addr;
+
+	list_for_each_entry(chunk, &physmem->alloc_head.allocated,
+			    allocated) {
+		struct vcm *vcm = res->vcm;
+		size_t chunk_size = chunk->size;
+
+		if (chunk_size <= 0) {
+			vcm_err("Bad chunk size: %d\n", chunk_size);
+			goto fail;
+		}
+
+		switch (vcm->type) {
+		case VCM_DEVICE:
+		{
+			/* map all */
+			ret = vcm_process_chunk(vcm->domain, chunk->pa,
+						va, chunk_size, attr, 1);
+			if (ret != 0) {
+				vcm_err("vcm_process_chunk(%p, %p, %p,"
+					" 0x%x, 0x%x)"
+					" ret %i",
+					vcm->domain,
+					(void *) chunk->pa,
+					(void *) va,
+					(int) chunk_size, attr, ret);
+				goto fail;
+			}
+			break;
+		}
+
+		case VCM_EXT_KERNEL:
+		{
+			unsigned int pages_in_chunk = chunk_size / PAGE_SIZE;
+			unsigned long loc_va = va;
+			unsigned long loc_pa = chunk->pa;
+
+			const struct mem_type *mtype;
+
+			/* TODO: get this based on MEMTYPE */
+			mtype = get_mem_type(MT_DEVICE);
+			if (!mtype) {
+				vcm_err("mtype is 0\n");
+				goto fail;
+			}
+
+			/* TODO: Map with the same chunk size */
+			while (pages_in_chunk--) {
+				ret = ioremap_page(loc_va,
+						   loc_pa,
+						   mtype);
+				if (ret != 0) {
+					vcm_err("ioremap_page(%p, %p, %p) ret"
+						" %i", (void *) loc_va,
+						(void *) loc_pa,
+						(void *) mtype, ret);
+					goto fail;
+					/* TODO handle weird
+					   inter-map case */
+				}
+
+				/* hack part 2 */
+				/* we're changing the PT entry behind
+				 * linux's back
+				 */
+				ret = cpu_set_attr(loc_va, PAGE_SIZE, attr);
+				if (ret != 0) {
+					vcm_err("cpu_set_attr(%p, %lu, %x)"
+						"ret %i\n",
+						(void *) loc_va, PAGE_SIZE,
+						attr, ret);
+					goto fail;
+					/* TODO handle weird
+					   inter-map case */
+				}
+
+				res->mapped = 1;
+
+				loc_va += PAGE_SIZE;
+				loc_pa += PAGE_SIZE;
+			}
+
+			flush_cache_vmap(va, loc_va);
+			break;
+		}
+		case VCM_ONE_TO_ONE:
+			va = chunk->pa;
+			break;
+		default:
+			/* this should never happen */
+			goto fail;
+		}
+
+		va += chunk_size;
+		/* also add res to the allocated chunk list of refs */
+	}
+
+	/* note the reservation */
+	res->physmem = physmem;
+
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return 0;
+fail_eagain:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EAGAIN;
+fail:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EINVAL;
+}
+
+
+int vcm_unback(struct res *res)
+{
+	unsigned long flags;
+	struct vcm *vcm;
+	struct physmem *physmem;
+	int ret;
+
+	spin_lock_irqsave(&vcmlock, flags);
+
+	if (!res)
+		goto fail;
+
+	vcm = res->vcm;
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	if (!res->physmem) {
+		vcm_err("can't unback a non-backed reservation\n");
+		goto fail;
+	}
+
+	physmem = res->physmem;
+	if (!physmem) {
+		vcm_err("physmem is NULL\n");
+		goto fail;
+	}
+
+	if (list_empty(&physmem->alloc_head.allocated)) {
+		vcm_err("physmem allocation is empty\n");
+		goto fail;
+	}
+
+	ret = vcm_no_assoc(res->vcm);
+	if (ret == 1) {
+		vcm_err("can't unback a unassociated reservation\n");
+		goto fail;
+	}
+
+	if (ret == -1) {
+		vcm_err("vcm_no_assoc(%p) ret -1\n", (void *) res->vcm);
+		goto fail;
+	}
+
+	ret = vcm_all_activated(res->vcm);
+	if (ret == 0) {
+		vcm_err("can't unback, not all associations are active\n");
+		goto fail_eagain;
+	}
+
+	if (ret == -1) {
+		vcm_err("vcm_all_activated(%p) ret -1\n", (void *) res->vcm);
+		goto fail;
+	}
+
+
+	switch (vcm->type) {
+	case VCM_EXT_KERNEL:
+		if (!res->mapped) {
+			vcm_err("can't unback an unmapped VCM_EXT_KERNEL"
+				" VCM\n");
+			goto fail;
+		}
+
+		/* vunmap free's vm_area */
+		vunmap(res->vm_area->addr);
+		res->vm_area = 0;
+
+		res->mapped = 0;
+		break;
+
+	case VCM_DEVICE:
+	{
+		struct phys_chunk *chunk;
+		size_t va = res->dev_addr;
+
+		list_for_each_entry(chunk, &physmem->alloc_head.allocated,
+				    allocated) {
+			struct vcm *vcm = res->vcm;
+			size_t chunk_size = chunk->size;
+
+			ret = vcm_process_chunk(vcm->domain, 0, va,
+						chunk_size, 0, 0);
+			if (ret != 0) {
+				vcm_err("vcm_unback_chunk(%p, %p, 0x%x)"
+					" ret %i",
+					(void *) vcm->domain,
+					(void *) va,
+					(int) chunk_size, ret);
+				goto fail;
+				/* TODO handle weird inter-unmap state*/
+			}
+
+			va += chunk_size;
+			/* may to a light unback, depending on the requested
+			* functionality
+			 */
+		}
+		break;
+	}
+
+	case VCM_ONE_TO_ONE:
+		break;
+	default:
+		/* this should never happen */
+		goto fail;
+	}
+
+	/* clear the reservation */
+	res->physmem = 0;
+
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return 0;
+fail_eagain:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EAGAIN;
+fail:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EINVAL;
+}
+
+
+enum memtarget_t vcm_get_memtype_of_res(struct res *res)
+{
+	return VCM_INVALID;
+}
+
+static int vcm_free_max_munch_cont(struct phys_chunk *head)
+{
+	struct phys_chunk *chunk, *tmp;
+
+	if (!head)
+		return -EINVAL;
+
+	list_for_each_entry_safe(chunk, tmp, &head->allocated,
+				 allocated) {
+		list_del_init(&chunk->allocated);
+	}
+
+	return 0;
+}
+
+static int vcm_alloc_max_munch_cont(size_t start_addr, size_t len,
+				    struct phys_chunk *head)
+{
+	/* this function should always succeed, since it
+	   parallels a VCM */
+
+	int i, j;
+
+	if (!head) {
+		vcm_err("head is NULL in continuous map.\n");
+		goto fail;
+	}
+
+	if (start_addr < (int) bootmem_cont) {
+		vcm_err("phys start addr (%p) < base (%p)\n",
+			(void *) start_addr, (void *) bootmem_cont);
+		goto fail;
+	}
+
+	if ((start_addr + len) >= ((size_t) bootmem_cont + cont_sz)) {
+		vcm_err("requested region (%p + %i) > "
+			" available region (%p + %i)",
+			(void *) start_addr, (int) len,
+			(void *) bootmem_cont, cont_sz);
+		goto fail;
+	}
+
+	i = (start_addr - (size_t) bootmem_cont)/SZ_4K;
+
+	for (j = 0; j < ARRAY_SIZE(smmu_map_sizes); ++j) {
+		while (len/smmu_map_sizes[j]) {
+			if (!list_empty(&cont_phys_chunk[i].allocated)) {
+				vcm_err("chunk %i ( addr %p) already mapped\n",
+					i, (void *) (start_addr +
+						     (i*smmu_map_sizes[j])));
+				goto fail_free;
+			}
+			list_add_tail(&cont_phys_chunk[i].allocated,
+				      &head->allocated);
+			cont_phys_chunk[i].size = smmu_map_sizes[j];
+
+			len -= smmu_map_sizes[j];
+			i += smmu_map_sizes[j]/SZ_4K;
+		}
+	}
+
+	if (len % SZ_4K) {
+		if (!list_empty(&cont_phys_chunk[i].allocated)) {
+			vcm_err("chunk %i (addr %p) already mapped\n",
+				i, (void *) (start_addr + (i*SZ_4K)));
+			goto fail_free;
+		}
+		len -= SZ_4K;
+		list_add_tail(&cont_phys_chunk[i].allocated,
+			      &head->allocated);
+
+		i++;
+	}
+
+	return i;
+
+fail_free:
+	{
+		struct phys_chunk *chunk, *tmp;
+		/* just remove from list, if we're double alloc'ing
+		   we don't want to stamp on the other guy */
+		list_for_each_entry_safe(chunk, tmp, &head->allocated,
+					 allocated) {
+			list_del(&chunk->allocated);
+		}
+	}
+fail:
+	return 0;
+}
+
+struct physmem *vcm_phys_alloc(enum memtype_t memtype, size_t len, u32 attr)
+{
+	unsigned long flags;
+	int ret;
+	struct physmem *physmem = NULL;
+	int blocks_allocated;
+
+	spin_lock_irqsave(&vcmlock, flags);
+
+	physmem = kzalloc(sizeof(*physmem), GFP_KERNEL);
+	if (!physmem) {
+		vcm_err("physmem is NULL\n");
+		goto fail;
+	}
+
+	physmem->memtype = memtype;
+	physmem->len = len;
+	physmem->attr = attr;
+
+	INIT_LIST_HEAD(&physmem->alloc_head.allocated);
+
+	if (attr & VCM_PHYS_CONT) {
+		if (!cont_vcm_id) {
+			vcm_err("cont_vcm_id is NULL\n");
+			goto fail2;
+		}
+
+		physmem->is_cont = 1;
+
+		/* TODO: get attributes */
+		physmem->res = __vcm_reserve(cont_vcm_id, len, 0);
+		if (physmem->res == 0) {
+			vcm_err("contiguous space allocation failed\n");
+			goto fail2;
+		}
+
+		/* if we're here we know we have memory, create
+		   the shadow physmem links*/
+		blocks_allocated =
+			vcm_alloc_max_munch_cont(
+				physmem->res->dev_addr,
+				len,
+				&physmem->alloc_head);
+
+		if (blocks_allocated == 0) {
+			vcm_err("shadow physmem allocation failed\n");
+			goto fail3;
+		}
+	} else {
+		blocks_allocated = vcm_alloc_max_munch(len, memtype,
+						       &physmem->alloc_head);
+		if (blocks_allocated == 0) {
+			vcm_err("physical allocation failed:"
+				" vcm_alloc_max_munch(%i, %p) ret 0\n",
+				len, &physmem->alloc_head);
+			goto fail2;
+		}
+	}
+
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return physmem;
+
+fail3:
+	ret = __vcm_unreserve(physmem->res);
+	if (ret != 0) {
+		vcm_err("vcm_unreserve(%p) ret %i during cleanup",
+			(void *) physmem->res, ret);
+		spin_unlock_irqrestore(&vcmlock, flags);
+		return 0;
+	}
+fail2:
+	kfree(physmem);
+fail:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return 0;
+}
+
+
+int vcm_phys_free(struct physmem *physmem)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&vcmlock, flags);
+
+	if (!physmem) {
+		vcm_err("physmem is NULL\n");
+		goto fail;
+	}
+
+	if (physmem->is_cont) {
+		if (physmem->res == 0) {
+			vcm_err("contiguous reservation is NULL\n");
+			goto fail;
+		}
+
+		ret = vcm_free_max_munch_cont(&physmem->alloc_head);
+		if (ret != 0) {
+			vcm_err("failed to free physical blocks:"
+				" vcm_free_max_munch_cont(%p) ret %i\n",
+				(void *) &physmem->alloc_head, ret);
+			goto fail;
+		}
+
+		ret = __vcm_unreserve(physmem->res);
+		if (ret != 0) {
+			vcm_err("failed to free virtual blocks:"
+				" vcm_unreserve(%p) ret %i\n",
+				(void *) physmem->res, ret);
+			goto fail;
+		}
+
+	} else {
+
+		ret = vcm_alloc_free_blocks(physmem->memtype,
+					    &physmem->alloc_head);
+		if (ret != 0) {
+			vcm_err("failed to free physical blocks:"
+				" vcm_alloc_free_blocks(%p) ret %i\n",
+				(void *) &physmem->alloc_head, ret);
+			goto fail;
+		}
+	}
+
+	memset(physmem, 0, sizeof(*physmem));
+
+	kfree(physmem);
+
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return 0;
+
+fail:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EINVAL;
+}
+
+
+struct avcm *vcm_assoc(struct vcm *vcm, struct device *dev, u32 attr)
+{
+	unsigned long flags;
+	struct avcm *avcm = NULL;
+
+	spin_lock_irqsave(&vcmlock, flags);
+
+	if (!vcm) {
+		vcm_err("vcm is NULL\n");
+		goto fail;
+	}
+
+	if (!dev) {
+		vcm_err("dev_id is NULL\n");
+		goto fail;
+	}
+
+	if (vcm->type == VCM_EXT_KERNEL && !list_empty(&vcm->assoc_head)) {
+		vcm_err("only one device may be assocoated with a"
+			" VCM_EXT_KERNEL\n");
+		goto fail;
+	}
+
+	avcm = kzalloc(sizeof(*avcm), GFP_KERNEL);
+	if (!avcm) {
+		vcm_err("kzalloc(%i, GFP_KERNEL) ret NULL\n", sizeof(*avcm));
+		goto fail;
+	}
+
+	avcm->dev = dev;
+
+	avcm->vcm = vcm;
+	avcm->attr = attr;
+	avcm->is_active = 0;
+
+	INIT_LIST_HEAD(&avcm->assoc_elm);
+	list_add(&avcm->assoc_elm, &vcm->assoc_head);
+
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return avcm;
+
+fail:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return 0;
+}
+
+
+int vcm_deassoc(struct avcm *avcm)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vcmlock, flags);
+
+	if (!avcm) {
+		vcm_err("avcm is NULL\n");
+		goto fail;
+	}
+
+	if (list_empty(&avcm->assoc_elm)) {
+		vcm_err("nothing to deassociate\n");
+		goto fail;
+	}
+
+	if (avcm->is_active) {
+		vcm_err("association still activated\n");
+		goto fail_busy;
+	}
+
+	list_del(&avcm->assoc_elm);
+
+	memset(avcm, 0, sizeof(*avcm));
+
+	kfree(avcm);
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return 0;
+fail_busy:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EBUSY;
+fail:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EINVAL;
+}
+
+
+int vcm_set_assoc_attr(struct avcm *avcm, u32 attr)
+{
+	return 0;
+}
+
+
+u32 vcm_get_assoc_attr(struct avcm *avcm)
+{
+	return 0;
+}
+
+
+int vcm_activate(struct avcm *avcm)
+{
+	unsigned long flags;
+	struct vcm *vcm;
+
+	spin_lock_irqsave(&vcmlock, flags);
+
+	if (!avcm) {
+		vcm_err("avcm is NULL\n");
+		goto fail;
+	}
+
+	vcm = avcm->vcm;
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	if (!avcm->dev) {
+		vcm_err("cannot activate without a device\n");
+		goto fail_nodev;
+	}
+
+	if (avcm->is_active) {
+		vcm_err("double activate\n");
+		goto fail_busy;
+	}
+
+	if (vcm->type == VCM_DEVICE) {
+#ifdef CONFIG_SMMU
+		int ret;
+		ret = iommu_attach_device(vcm->domain, avcm->dev);
+		if (ret != 0) {
+			dev_err(avcm->dev, "failed to attach to domain\n");
+			goto fail_dev;
+		}
+#else
+		vcm_err("No SMMU support - cannot activate/deactivate\n");
+		goto fail_nodev;
+#endif
+	}
+
+	avcm->is_active = 1;
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return 0;
+
+#ifdef CONFIG_SMMU
+fail_dev:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -ENODEV;
+#endif
+fail_busy:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EBUSY;
+fail_nodev:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -ENODEV;
+fail:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EINVAL;
+}
+
+
+int vcm_deactivate(struct avcm *avcm)
+{
+	unsigned long flags;
+	struct vcm *vcm;
+
+	spin_lock_irqsave(&vcmlock, flags);
+
+	if (!avcm)
+		goto fail;
+
+	vcm = avcm->vcm;
+	if (!vcm) {
+		vcm_err("NULL vcm\n");
+		goto fail;
+	}
+
+	if (!avcm->dev) {
+		vcm_err("cannot deactivate without a device\n");
+		goto fail;
+	}
+
+	if (!avcm->is_active) {
+		vcm_err("double deactivate\n");
+		goto fail_nobusy;
+	}
+
+	if (vcm->type == VCM_DEVICE) {
+#ifdef CONFIG_SMMU
+		/* TODO, pmem check */
+		iommu_detach_device(vcm->domain, avcm->dev);
+#else
+		vcm_err("No SMMU support - cannot activate/deactivate\n");
+		goto fail;
+#endif
+	}
+
+	avcm->is_active = 0;
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return 0;
+fail_nobusy:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -ENOENT;
+fail:
+	spin_unlock_irqrestore(&vcmlock, flags);
+	return -EINVAL;
+}
+
+struct bound *vcm_create_bound(struct vcm *vcm, size_t len)
+{
+	return 0;
+}
+
+
+int vcm_free_bound(struct bound *bound)
+{
+	return -EINVAL;
+}
+
+
+struct res *vcm_reserve_from_bound(struct bound *bound, size_t len,
+				   u32 attr)
+{
+	return 0;
+}
+
+
+size_t vcm_get_bound_start_addr(struct bound *bound)
+{
+	return 0;
+}
+
+
+size_t vcm_get_bound_len(struct bound *bound)
+{
+	return 0;
+}
+
+
+struct physmem *vcm_map_phys_addr(phys_addr_t phys, size_t len)
+{
+	return 0;
+}
+
+
+size_t vcm_get_next_phys_addr(struct physmem *physmem, phys_addr_t phys,
+			      size_t *len)
+{
+	return 0;
+}
+
+
+struct res *vcm_get_res(unsigned long dev_addr, struct vcm *vcm)
+{
+	return 0;
+}
+
+
+size_t vcm_translate(struct device *src_dev, struct vcm *src_vcm,
+		     struct vcm *dst_vcm)
+{
+	return 0;
+}
+
+
+size_t vcm_get_phys_num_res(phys_addr_t phys)
+{
+	return 0;
+}
+
+
+struct res *vcm_get_next_phys_res(phys_addr_t phys, struct res *res,
+				  size_t *len)
+{
+	return 0;
+}
+
+
+phys_addr_t vcm_get_pgtbl_pa(struct vcm *vcm)
+{
+	return 0;
+}
+
+
+/* No lock needed, smmu_translate has its own lock */
+phys_addr_t vcm_dev_addr_to_phys_addr(struct vcm *vcm, unsigned long dev_addr)
+{
+	if (!vcm)
+		return -EINVAL;
+#ifdef CONFIG_SMMU
+	return iommu_iova_to_phys(vcm->domain, dev_addr);
+#else
+	vcm_err("No support for SMMU - manual translation not supported\n");
+	return -ENODEV;
+#endif
+}
+
+
+/* No lock needed, bootmem_cont never changes after  */
+phys_addr_t vcm_get_cont_memtype_pa(enum memtype_t memtype)
+{
+	if (memtype != VCM_MEMTYPE_0) {
+		vcm_err("memtype != VCM_MEMTYPE_0\n");
+		goto fail;
+	}
+
+	if (!bootmem_cont) {
+		vcm_err("bootmem_cont 0\n");
+		goto fail;
+	}
+
+	return (size_t) bootmem_cont;
+fail:
+	return 0;
+}
+
+
+/* No lock needed, constant */
+size_t vcm_get_cont_memtype_len(enum memtype_t memtype)
+{
+	if (memtype != VCM_MEMTYPE_0) {
+		vcm_err("memtype != VCM_MEMTYPE_0\n");
+		return 0;
+	}
+
+	return cont_sz;
+}
+
+int vcm_hook(struct device *dev, vcm_handler handler, void *data)
+{
+#ifdef CONFIG_SMMU
+	vcm_err("No interrupts in IOMMU API\n");
+	return -ENODEV;
+#else
+	vcm_err("No support for SMMU - interrupts not supported\n");
+	return -ENODEV;
+#endif
+}
+
+
+size_t vcm_hw_ver(size_t dev)
+{
+	return 0;
+}
+
+
+static int vcm_cont_phys_chunk_init(void)
+{
+	int i;
+	int cont_pa;
+
+	if (!cont_phys_chunk) {
+		vcm_err("cont_phys_chunk 0\n");
+		goto fail;
+	}
+
+	if (!bootmem_cont) {
+		vcm_err("bootmem_cont 0\n");
+		goto fail;
+	}
+
+	cont_pa = (size_t) bootmem_cont;
+
+	for (i = 0; i < cont_sz/PAGE_SIZE; ++i) {
+		cont_phys_chunk[i].pa = cont_pa; cont_pa += PAGE_SIZE;
+		cont_phys_chunk[i].size = SZ_4K;
+		/* Not part of an allocator-managed pool */
+		cont_phys_chunk[i].pool_idx = -1;
+		INIT_LIST_HEAD(&cont_phys_chunk[i].allocated);
+	}
+
+	return 0;
+
+fail:
+	return -EINVAL;
+}
+
+int vcm_sys_init(struct physmem_region *mem, int n_regions,
+		 struct vcm_memtype_map *mt_map, int n_mt,
+		 void *cont_pa, unsigned int cont_len)
+{
+	int ret;
+	printk(KERN_INFO "VCM Initialization\n");
+	bootmem_cont = cont_pa;
+	cont_sz = cont_len;
+
+	if (!bootmem_cont) {
+		vcm_err("bootmem_cont is 0\n");
+		ret = -1;
+		goto fail;
+	}
+
+	ret = vcm_setup_tex_classes();
+	if (ret != 0) {
+		printk(KERN_INFO "Could not determine TEX attribute mapping\n");
+		ret = -1;
+		goto fail;
+	}
+
+
+	ret = vcm_alloc_init(mem, n_regions, mt_map, n_mt);
+
+	if (ret != 0) {
+		vcm_err("vcm_alloc_init() ret %i\n", ret);
+		ret = -1;
+		goto fail;
+	}
+
+	cont_phys_chunk = kzalloc(sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE),
+				  GFP_KERNEL);
+	if (!cont_phys_chunk) {
+		vcm_err("kzalloc(%lu, GFP_KERNEL) ret 0",
+			sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE));
+		goto fail_free;
+	}
+
+	/* the address and size will hit our special case unless we
+	   pass an override */
+	cont_vcm_id = vcm_create_flagged(0, (size_t)bootmem_cont, cont_sz);
+	if (cont_vcm_id == 0) {
+		vcm_err("vcm_create_flagged(0, %p, %i) ret 0\n",
+			bootmem_cont, cont_sz);
+		ret = -1;
+		goto fail_free2;
+	}
+
+	ret = vcm_cont_phys_chunk_init();
+	if (ret != 0) {
+		vcm_err("vcm_cont_phys_chunk_init() ret %i\n", ret);
+		goto fail_free3;
+	}
+
+	printk(KERN_INFO "VCM Initialization OK\n");
+	return 0;
+
+fail_free3:
+	ret = __vcm_free(cont_vcm_id);
+	if (ret != 0) {
+		vcm_err("vcm_free(%p) ret %i during failure path\n",
+			(void *) cont_vcm_id, ret);
+		return ret;
+	}
+
+fail_free2:
+	kfree(cont_phys_chunk);
+	cont_phys_chunk = 0;
+
+fail_free:
+	ret = vcm_alloc_destroy();
+	if (ret != 0)
+		vcm_err("vcm_alloc_destroy() ret %i during failure path\n",
+			ret);
+
+	ret = -EINVAL;
+fail:
+	return ret;
+}
+
+
+int vcm_sys_destroy(void)
+{
+	int ret = 0;
+
+	if (!cont_phys_chunk) {
+		vcm_err("cont_phys_chunk is 0\n");
+		return -ENODEV;
+	}
+
+	if (!cont_vcm_id) {
+		vcm_err("cont_vcm_id is 0\n");
+		return -ENODEV;
+	}
+
+	ret = __vcm_free(cont_vcm_id);
+	if (ret != 0) {
+		vcm_err("vcm_free(%p) ret %i\n", (void *) cont_vcm_id, ret);
+		return -ENODEV;
+	}
+
+	cont_vcm_id = 0;
+
+	kfree(cont_phys_chunk);
+	cont_phys_chunk = 0;
+
+	ret = vcm_alloc_destroy();
+	if (ret != 0) {
+		vcm_err("vcm_alloc_destroy() ret %i\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zach Pfeffer <zpfeffer@codeaurora.org>");
diff --git a/arch/arm/mm/vcm_alloc.c b/arch/arm/mm/vcm_alloc.c
new file mode 100644
index 0000000..5f3c024
--- /dev/null
+++ b/arch/arm/mm/vcm_alloc.c
@@ -0,0 +1,557 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/vcm.h>
+#include <linux/vcm_alloc.h>
+#include <linux/string.h>
+#include <asm/sizes.h>
+
+int basicalloc_init;
+
+#define vcm_alloc_err(a, ...)						\
+	pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__)
+
+struct phys_chunk_head {
+	struct list_head head;
+	int num;
+};
+
+struct phys_pool {
+	int size;
+	int chunk_size;
+	struct phys_chunk_head head;
+};
+
+static int vcm_num_phys_pools;
+static int vcm_num_memtypes;
+static struct phys_pool *vcm_phys_pool;
+static struct vcm_memtype_map *memtype_map;
+
+static int num_pools(enum memtype_t memtype)
+{
+	if (memtype >= vcm_num_memtypes) {
+		vcm_alloc_err("Bad memtype: %d\n", memtype);
+		return -EINVAL;
+	}
+	return memtype_map[memtype].num_pools;
+}
+
+static int pool_chunk_size(enum memtype_t memtype, int prio_idx)
+{
+	int pool_idx;
+	if (memtype >= vcm_num_memtypes) {
+		vcm_alloc_err("Bad memtype: %d\n", memtype);
+		return -EINVAL;
+	}
+
+	if (prio_idx >= num_pools(memtype)) {
+		vcm_alloc_err("Bad prio index: %d, max=%d, mt=%d\n", prio_idx,
+			      num_pools(memtype), memtype);
+		return -EINVAL;
+	}
+
+	pool_idx = memtype_map[memtype].pool_id[prio_idx];
+	return vcm_phys_pool[pool_idx].chunk_size;
+}
+
+int vcm_alloc_pool_idx_to_size(int pool_idx)
+{
+	if (pool_idx >= vcm_num_phys_pools) {
+		vcm_alloc_err("Bad pool index: %d\n, max=%d\n", pool_idx,
+			      vcm_num_phys_pools);
+		return -EINVAL;
+	}
+	return vcm_phys_pool[pool_idx].chunk_size;
+}
+
+static struct phys_chunk_head *get_chunk_list(enum memtype_t memtype,
+					      int prio_idx)
+{
+	unsigned int pool_idx;
+
+	if (memtype >= vcm_num_memtypes) {
+		vcm_alloc_err("Bad memtype: %d\n", memtype);
+		return NULL;
+	}
+
+	if (prio_idx >= num_pools(memtype)) {
+		vcm_alloc_err("bad chunk size: mt=%d, prioidx=%d, np=%d\n",
+			      memtype, prio_idx, num_pools(memtype));
+		BUG();
+		return NULL;
+	}
+
+	if (!vcm_phys_pool) {
+		vcm_alloc_err("phys_pool is null\n");
+		return NULL;
+	}
+
+	/* We don't have a "pool count" anywhere but this is coming
+	 * strictly from data in a board file
+	 */
+	pool_idx = memtype_map[memtype].pool_id[prio_idx];
+
+	return &vcm_phys_pool[pool_idx].head;
+}
+
+static int is_allocated(struct list_head *allocated)
+{
+	/* This should not happen under normal conditions */
+	if (!allocated) {
+		vcm_alloc_err("no allocated\n");
+		return 0;
+	}
+
+	if (!basicalloc_init) {
+		vcm_alloc_err("no basicalloc_init\n");
+		return 0;
+	}
+	return !list_empty(allocated);
+}
+
+static int count_allocated_size(enum memtype_t memtype, int idx)
+{
+	int cnt = 0;
+	struct phys_chunk *chunk, *tmp;
+	struct phys_chunk_head *pch;
+
+	if (!basicalloc_init) {
+		vcm_alloc_err("no basicalloc_init\n");
+		return 0;
+	}
+
+	pch = get_chunk_list(memtype, idx);
+	if (!pch) {
+		vcm_alloc_err("null pch\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(chunk, tmp, &pch->head, list) {
+		if (is_allocated(&chunk->allocated))
+			cnt++;
+	}
+
+	return cnt;
+}
+
+
+int vcm_alloc_get_mem_size(void)
+{
+	if (!vcm_phys_pool) {
+		vcm_alloc_err("No physical pool set up!\n");
+		return -ENODEV;
+	}
+	return vcm_phys_pool[0].size;
+}
+EXPORT_SYMBOL(vcm_alloc_get_mem_size);
+
+void vcm_alloc_print_list(enum memtype_t memtype, int just_allocated)
+{
+	int i;
+	struct phys_chunk *chunk, *tmp;
+	struct phys_chunk_head *pch;
+
+	if (!basicalloc_init) {
+		vcm_alloc_err("no basicalloc_init\n");
+		return;
+	}
+
+	for (i = 0; i < num_pools(memtype); ++i) {
+		pch = get_chunk_list(memtype, i);
+
+		if (!pch) {
+			vcm_alloc_err("pch is null\n");
+			return;
+		}
+
+		if (list_empty(&pch->head))
+			continue;
+
+		list_for_each_entry_safe(chunk, tmp, &pch->head, list) {
+			if (just_allocated && !is_allocated(&chunk->allocated))
+				continue;
+
+			printk(KERN_INFO "pa = %#x, size = %#x\n",
+			chunk->pa, vcm_phys_pool[chunk->pool_idx].chunk_size);
+		}
+	}
+}
+EXPORT_SYMBOL(vcm_alloc_print_list);
+
+int vcm_alloc_blocks_avail(enum memtype_t memtype, int idx)
+{
+	struct phys_chunk_head *pch;
+	if (!basicalloc_init) {
+		vcm_alloc_err("no basicalloc_init\n");
+		return 0;
+	}
+	pch = get_chunk_list(memtype, idx);
+
+	if (!pch) {
+		vcm_alloc_err("pch is null\n");
+		return 0;
+	}
+	return pch->num;
+}
+EXPORT_SYMBOL(vcm_alloc_blocks_avail);
+
+
+int vcm_alloc_get_num_chunks(enum memtype_t memtype)
+{
+	return num_pools(memtype);
+}
+EXPORT_SYMBOL(vcm_alloc_get_num_chunks);
+
+
+int vcm_alloc_all_blocks_avail(enum memtarget_t memtype)
+{
+	int i;
+	int cnt = 0;
+
+	if (!basicalloc_init) {
+		vcm_alloc_err("no basicalloc_init\n");
+		return 0;
+	}
+
+	for (i = 0; i < num_pools(memtype); ++i)
+		cnt += vcm_alloc_blocks_avail(memtype, i);
+	return cnt;
+}
+EXPORT_SYMBOL(vcm_alloc_all_blocks_avail);
+
+
+int vcm_alloc_count_allocated(enum memtype_t memtype)
+{
+	int i;
+	int cnt = 0;
+
+	if (!basicalloc_init) {
+		vcm_alloc_err("no basicalloc_init\n");
+		return 0;
+	}
+
+	for (i = 0; i < num_pools(memtype); ++i)
+		cnt += count_allocated_size(memtype, i);
+	return cnt;
+}
+EXPORT_SYMBOL(vcm_alloc_count_allocated);
+
+int vcm_alloc_destroy(void)
+{
+	int i, mt;
+	struct phys_chunk *chunk, *tmp;
+
+	if (!basicalloc_init) {
+		vcm_alloc_err("no basicalloc_init\n");
+		return -ENODEV;
+	}
+
+	/* can't destroy a space that has allocations */
+	for (mt = 0; mt < vcm_num_memtypes; mt++)
+		if (vcm_alloc_count_allocated(mt)) {
+			vcm_alloc_err("allocations still present\n");
+			return -EBUSY;
+		}
+
+	for (i = 0; i < vcm_num_phys_pools; i++) {
+		struct phys_chunk_head *pch = &vcm_phys_pool[i].head;
+
+		if (list_empty(&pch->head))
+			continue;
+		list_for_each_entry_safe(chunk, tmp, &pch->head, list) {
+			list_del(&chunk->list);
+			memset(chunk, 0, sizeof(*chunk));
+			kfree(chunk);
+		}
+		vcm_phys_pool[i].head.num = 0;
+	}
+
+	kfree(vcm_phys_pool);
+	kfree(memtype_map);
+
+	vcm_phys_pool = NULL;
+	memtype_map = NULL;
+	basicalloc_init = 0;
+	vcm_num_phys_pools = 0;
+	return 0;
+}
+EXPORT_SYMBOL(vcm_alloc_destroy);
+
+
+int vcm_alloc_init(struct physmem_region *mem, int n_regions,
+		   struct vcm_memtype_map *mt_map, int n_mt)
+{
+	int i = 0, j = 0, r = 0, num_chunks;
+	struct phys_chunk *chunk;
+	struct phys_chunk_head *pch = NULL;
+	unsigned long pa;
+
+	/* no double inits */
+	if (basicalloc_init) {
+		vcm_alloc_err("double basicalloc_init\n");
+		BUG();
+		goto fail;
+	}
+	memtype_map = kzalloc(sizeof(*mt_map) * n_mt, GFP_KERNEL);
+	if (!memtype_map) {
+		vcm_alloc_err("Could not copy memtype map\n");
+		goto fail;
+	}
+	memcpy(memtype_map, mt_map, sizeof(*mt_map) * n_mt);
+
+	vcm_phys_pool = kzalloc(sizeof(*vcm_phys_pool) * n_regions, GFP_KERNEL);
+	vcm_num_phys_pools = n_regions;
+	vcm_num_memtypes = n_mt;
+
+	if (!vcm_phys_pool) {
+		vcm_alloc_err("Could not allocate physical pool structure\n");
+		goto fail;
+	}
+
+	/* separate out to ensure good cleanup */
+	for (i = 0; i < n_regions; i++) {
+		pch = &vcm_phys_pool[i].head;
+		INIT_LIST_HEAD(&pch->head);
+		pch->num = 0;
+	}
+
+	for (r = 0; r < n_regions; r++) {
+		pa = mem[r].addr;
+		vcm_phys_pool[r].size = mem[r].size;
+		vcm_phys_pool[r].chunk_size = mem[r].chunk_size;
+		pch = &vcm_phys_pool[r].head;
+
+		num_chunks = mem[r].size / mem[r].chunk_size;
+
+		printk(KERN_INFO "VCM Init: region %d, chunk size=%d, "
+		       "num=%d, pa=%p\n", r, mem[r].chunk_size, num_chunks,
+		       (void *)pa);
+
+		for (j = 0; j < num_chunks; ++j) {
+			chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+			if (!chunk) {
+				vcm_alloc_err("null chunk\n");
+				goto fail;
+			}
+			chunk->pa = pa;
+			chunk->size = mem[r].chunk_size;
+			pa += mem[r].chunk_size;
+			chunk->pool_idx = r;
+			INIT_LIST_HEAD(&chunk->allocated);
+			list_add_tail(&chunk->list, &pch->head);
+			pch->num++;
+		}
+	}
+
+	basicalloc_init = 1;
+	return 0;
+fail:
+	vcm_alloc_destroy();
+	return -EINVAL;
+}
+EXPORT_SYMBOL(vcm_alloc_init);
+
+
+int vcm_alloc_free_blocks(enum memtype_t memtype, struct phys_chunk *alloc_head)
+{
+	struct phys_chunk *chunk, *tmp;
+	struct phys_chunk_head *pch = NULL;
+
+	if (!basicalloc_init) {
+		vcm_alloc_err("no basicalloc_init\n");
+		goto fail;
+	}
+
+	if (!alloc_head) {
+		vcm_alloc_err("no alloc_head\n");
+		goto fail;
+	}
+
+	list_for_each_entry_safe(chunk, tmp, &alloc_head->allocated,
+				 allocated) {
+		list_del_init(&chunk->allocated);
+		pch = &vcm_phys_pool[chunk->pool_idx].head;
+
+		if (!pch) {
+			vcm_alloc_err("null pch\n");
+			goto fail;
+		}
+		pch->num++;
+	}
+
+	return 0;
+fail:
+	return -ENODEV;
+}
+EXPORT_SYMBOL(vcm_alloc_free_blocks);
+
+
+int vcm_alloc_num_blocks(int num, enum memtype_t memtype, int idx,
+			 struct phys_chunk *alloc_head)
+{
+	struct phys_chunk *chunk;
+	struct phys_chunk_head *pch = NULL;
+	int num_allocated = 0;
+
+	if (!basicalloc_init) {
+		vcm_alloc_err("no basicalloc_init\n");
+		goto fail;
+	}
+
+	if (!alloc_head) {
+		vcm_alloc_err("no alloc_head\n");
+		goto fail;
+	}
+
+	pch = get_chunk_list(memtype, idx);
+
+	if (!pch) {
+		vcm_alloc_err("null pch\n");
+		goto fail;
+	}
+	if (list_empty(&pch->head)) {
+		vcm_alloc_err("list is empty\n");
+		goto fail;
+	}
+
+	if (vcm_alloc_blocks_avail(memtype, idx) < num) {
+		vcm_alloc_err("not enough blocks? num=%d\n", num);
+		goto fail;
+	}
+
+	list_for_each_entry(chunk, &pch->head, list) {
+		if (num_allocated == num)
+			break;
+		if (is_allocated(&chunk->allocated))
+			continue;
+
+		list_add_tail(&chunk->allocated, &alloc_head->allocated);
+		pch->num--;
+		num_allocated++;
+	}
+	return num_allocated;
+fail:
+	return 0;
+}
+EXPORT_SYMBOL(vcm_alloc_num_blocks);
+
+
+int vcm_alloc_max_munch(int len, enum memtype_t memtype,
+			struct phys_chunk *alloc_head)
+{
+	int i;
+
+	int blocks_req = 0;
+	int block_residual = 0;
+	int blocks_allocated = 0;
+	int cur_chunk_size = 0;
+	int ba = 0;
+
+	if (!basicalloc_init) {
+		vcm_alloc_err("basicalloc_init is 0\n");
+		goto fail;
+	}
+
+	if (!alloc_head) {
+		vcm_alloc_err("alloc_head is NULL\n");
+		goto fail;
+	}
+
+	if (num_pools(memtype) <= 0) {
+		vcm_alloc_err("Memtype %d has improper mempool configuration\n",
+			      memtype);
+		goto fail;
+	}
+
+	for (i = 0; i < num_pools(memtype); ++i) {
+		cur_chunk_size = pool_chunk_size(memtype, i);
+		if (cur_chunk_size <= 0) {
+			vcm_alloc_err("Bad chunk size: %d\n", cur_chunk_size);
+			goto fail;
+		}
+
+		blocks_req = len / cur_chunk_size;
+		block_residual = len % cur_chunk_size;
+
+		len = block_residual; /* len left */
+		if (blocks_req) {
+			int blocks_available = 0;
+			int blocks_diff = 0;
+			int bytes_diff = 0;
+
+			blocks_available = vcm_alloc_blocks_avail(memtype, i);
+			if (blocks_available < blocks_req) {
+				blocks_diff =
+					(blocks_req - blocks_available);
+				bytes_diff =
+					blocks_diff * cur_chunk_size;
+
+				/* add back in the rest */
+				len += bytes_diff;
+			} else {
+				/* got all the blocks I need */
+				blocks_available =
+					(blocks_available > blocks_req)
+					? blocks_req : blocks_available;
+			}
+
+			ba = vcm_alloc_num_blocks(blocks_available, memtype, i,
+						  alloc_head);
+
+			if (ba != blocks_available) {
+				vcm_alloc_err("blocks allocated (%i) !="
+					      " blocks_available (%i):"
+					      " chunk size = %#x,"
+					      " alloc_head = %p\n",
+					      ba, blocks_available,
+					      i, (void *) alloc_head);
+				goto fail;
+			}
+			blocks_allocated += blocks_available;
+		}
+	}
+
+	if (len) {
+		int blocks_available = 0;
+		int last_sz = num_pools(memtype) - 1;
+		blocks_available = vcm_alloc_blocks_avail(memtype, last_sz);
+
+		if (blocks_available > 0) {
+			ba = vcm_alloc_num_blocks(1, memtype, last_sz,
+						  alloc_head);
+			if (ba != 1) {
+				vcm_alloc_err("blocks allocated (%i) !="
+					      " blocks_available (%i):"
+					      " chunk size = %#x,"
+					      " alloc_head = %p\n",
+					      ba, 1,
+					      last_sz,
+					      (void *) alloc_head);
+				goto fail;
+			}
+			blocks_allocated += 1;
+		} else {
+			vcm_alloc_err("blocks_available (%#x) <= 1\n",
+				      blocks_available);
+			goto fail;
+		}
+	}
+
+	return blocks_allocated;
+fail:
+	vcm_alloc_free_blocks(memtype, alloc_head);
+	return 0;
+}
+EXPORT_SYMBOL(vcm_alloc_max_munch);
diff --git a/arch/arm/mm/vcm_mm.c b/arch/arm/mm/vcm_mm.c
new file mode 100644
index 0000000..dee51fa
--- /dev/null
+++ b/arch/arm/mm/vcm_mm.c
@@ -0,0 +1,253 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Architecture-specific VCM functions */
+
+#include <linux/kernel.h>
+#include <linux/vcm_mm.h>
+
+#include <asm/pgtable-hwdef.h>
+#include <asm/tlbflush.h>
+
+#define MRC(reg, processor, op1, crn, crm, op2)				\
+__asm__ __volatile__ (							\
+"   mrc   "   #processor "," #op1 ", %0,"  #crn "," #crm "," #op2 " \n" \
+: "=r" (reg))
+
+#define RCP15_PRRR(reg)		MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) 	MRC(reg, p15, 0, c10, c2, 1)
+
+
+/* Local type attributes (not the same as VCM) */
+#define ARM_MT_NORMAL		2
+#define ARM_MT_STRONGLYORDERED	0
+#define ARM_MT_DEVICE		1
+
+#define ARM_CP_NONCACHED	0
+#define ARM_CP_WB_WA		1
+#define ARM_CP_WB_NWA		3
+#define ARM_CP_WT_NWA		2
+
+#define smmu_err(a, ...)						\
+	pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__)
+
+#define FL_OFFSET(va)	(((va) & 0xFFF00000) >> 20)
+#define SL_OFFSET(va)	(((va) & 0xFF000) >> 12)
+
+int vcm_driver_tex_class[4];
+
+static int find_tex_class(int icp, int ocp, int mt, int nos)
+{
+	int i = 0;
+	unsigned int prrr = 0;
+	unsigned int nmrr = 0;
+	int c_icp, c_ocp, c_mt, c_nos;
+
+	RCP15_PRRR(prrr);
+	RCP15_NMRR(nmrr);
+
+	/* There are only 8 classes on this architecture */
+	/* If they add more classes, registers will VASTLY change */
+	for (i = 0; i < 8; i++)	{
+		c_nos = prrr & (1 << (i + 24)) ? 1 : 0;
+		c_mt = (prrr & (3 << (i * 2))) >> (i * 2);
+		c_icp = (nmrr & (3 << (i * 2))) >> (i * 2);
+		c_ocp = (nmrr & (3 << (i * 2 + 16))) >> (i * 2 + 16);
+
+		if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
+			return i;
+	}
+	smmu_err("Could not find TEX class for ICP=%d, OCP=%d, MT=%d, NOS=%d\n",
+		 icp, ocp, mt, nos);
+
+	/* In reality, we may want to remove this panic. Some classes just */
+	/* will not be available, and will fail in smmu_set_attr */
+	panic("SMMU: Could not determine TEX attribute mapping.\n");
+	return -1;
+}
+
+
+int vcm_setup_tex_classes(void)
+{
+	unsigned int cpu_prrr;
+	unsigned int cpu_nmrr;
+
+	if (!(get_cr() & CR_TRE))	/* No TRE? */
+		panic("TEX remap not enabled, but the SMMU driver needs it!\n");
+
+	RCP15_PRRR(cpu_prrr);
+	RCP15_NMRR(cpu_nmrr);
+
+	vcm_driver_tex_class[VCM_DEV_ATTR_NONCACHED] =
+		find_tex_class(ARM_CP_NONCACHED, ARM_CP_NONCACHED,
+			       ARM_MT_NORMAL, 1);
+
+	vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_WA] =
+		find_tex_class(ARM_CP_WB_WA, ARM_CP_WB_WA,
+			       ARM_MT_NORMAL, 1);
+
+	vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_NWA] =
+		find_tex_class(ARM_CP_WB_NWA, ARM_CP_WB_NWA,
+			       ARM_MT_NORMAL, 1);
+
+	vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WT] =
+		find_tex_class(ARM_CP_WT_NWA, ARM_CP_WT_NWA,
+			       ARM_MT_NORMAL, 1);
+#ifdef DEBUG_TEX
+	printk(KERN_INFO "VCM driver debug: Using TEX classes: %d %d %d %d\n",
+	       vcm_driver_tex_class[VCM_DEV_ATTR_NONCACHED],
+	       vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_WA],
+	       vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_NWA],
+	       vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WT]);
+#endif
+	return 0;
+}
+
+
+int set_arm7_pte_attr(unsigned long pt_base, unsigned long va,
+					unsigned long len, unsigned int attr)
+{
+	unsigned long *fl_table = NULL;
+	unsigned long *fl_pte = NULL;
+	unsigned long fl_offset = 0;
+	unsigned long *sl_table = NULL;
+	unsigned long *sl_pte = NULL;
+	unsigned long sl_offset = 0;
+	int i;
+	int sh = 0;
+	int class = 0;
+
+	/* Alignment */
+	if (va & (len-1)) {
+		smmu_err("misaligned va: %p\n", (void *) va);
+		goto fail;
+	}
+	if (attr > 7) {
+		smmu_err("bad attribute: %d\n", attr);
+		goto fail;
+	}
+
+	sh = (attr & VCM_DEV_ATTR_SH) ? 1 : 0;
+	class = vcm_driver_tex_class[attr & 0x03];
+
+	if (class > 7 || class < 0) {	/* Bad class */
+		smmu_err("bad tex class: %d\n", class);
+		goto fail;
+	}
+
+	if (len != SZ_16M && len != SZ_1M &&
+	    len != SZ_64K && len != SZ_4K) {
+		smmu_err("bad size: %lu\n", len);
+		goto fail;
+	}
+
+	fl_table = (unsigned long *) pt_base;
+
+	if (!fl_table) {
+		smmu_err("null page table\n");
+		goto fail;
+	}
+
+	fl_offset = FL_OFFSET(va);	/* Upper 12 bits */
+	fl_pte = fl_table + fl_offset;	/* int pointers, 4 bytes */
+
+	if (*fl_pte == 0) {	/* Nothing there! */
+		smmu_err("first level pte is 0\n");
+		goto fail;
+	}
+
+	/* Supersection attributes */
+	if (len == SZ_16M) {
+		for (i = 0; i < 16; i++) {
+			/* Clear the old bits */
+			*(fl_pte+i) &= ~(PMD_SECT_S | PMD_SECT_CACHEABLE |
+					 PMD_SECT_BUFFERABLE | PMD_SECT_TEX(1));
+
+			/* Assign new class and S bit */
+			*(fl_pte+i) |= sh ? PMD_SECT_S : 0;
+			*(fl_pte+i) |= class & 0x01 ? PMD_SECT_BUFFERABLE : 0;
+			*(fl_pte+i) |= class & 0x02 ? PMD_SECT_CACHEABLE : 0;
+			*(fl_pte+i) |= class & 0x04 ? PMD_SECT_TEX(1) : 0;
+		}
+	} else	if (len == SZ_1M) {
+
+		/* Clear the old bits */
+		*(fl_pte) &= ~(PMD_SECT_S | PMD_SECT_CACHEABLE |
+			       PMD_SECT_BUFFERABLE | PMD_SECT_TEX(1));
+
+		/* Assign new class and S bit */
+		*(fl_pte) |= sh ? PMD_SECT_S : 0;
+		*(fl_pte) |= class & 0x01 ? PMD_SECT_BUFFERABLE : 0;
+		*(fl_pte) |= class & 0x02 ? PMD_SECT_CACHEABLE : 0;
+		*(fl_pte) |= class & 0x04 ? PMD_SECT_TEX(1) : 0;
+	}
+
+	sl_table = (unsigned long *) __va(((*fl_pte) & 0xFFFFFC00));
+	sl_offset = SL_OFFSET(va);
+	sl_pte = sl_table + sl_offset;
+
+	if (len == SZ_64K) {
+		for (i = 0; i < 16; i++) {
+			/* Clear the old bits */
+			*(sl_pte+i) &= ~(PTE_EXT_SHARED | PTE_CACHEABLE |
+					 PTE_BUFFERABLE | PTE_EXT_TEX(1));
+
+			/* Assign new class and S bit */
+			*(sl_pte+i) |= sh ? PTE_EXT_SHARED : 0;
+			*(sl_pte+i) |= class & 0x01 ? PTE_BUFFERABLE : 0;
+			*(sl_pte+i) |= class & 0x02 ? PTE_CACHEABLE : 0;
+			*(sl_pte+i) |= class & 0x04 ? PTE_EXT_TEX(1) : 0;
+		}
+	} else 	if (len == SZ_4K) {
+		/* Clear the old bits */
+		*(sl_pte) &= ~(PTE_EXT_SHARED | PTE_CACHEABLE |
+			       PTE_BUFFERABLE | PTE_EXT_TEX(1));
+
+		/* Assign new class and S bit */
+		*(sl_pte) |= sh ? PTE_EXT_SHARED : 0;
+		*(sl_pte) |= class & 0x01 ? PTE_BUFFERABLE : 0;
+		*(sl_pte) |= class & 0x02 ? PTE_CACHEABLE : 0;
+		*(sl_pte) |= class & 0x04 ? PTE_EXT_TEX(1) : 0;
+	}
+
+
+	mb();
+	return 0;
+fail:
+	return 1;
+}
+
+
+int cpu_set_attr(unsigned long va, unsigned long len, unsigned int attr)
+{
+	int ret;
+	pgd_t *pgd = init_mm.pgd;
+
+	if (!pgd) {
+		smmu_err("null pgd\n");
+		goto fail;
+	}
+
+	ret = set_arm7_pte_attr((unsigned long)pgd, va, len, attr);
+
+	if (ret != 0) {
+		smmu_err("could not set attribute: \
+					pgd=%p, va=%p, len=%lu, attr=%d\n",
+			 (void *) pgd, (void *) va, len, attr);
+		goto fail;
+	}
+	dmb();
+	flush_tlb_all();
+	return 0;
+fail:
+	return -1;
+}
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index a631016..73e82f6 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -46,8 +46,8 @@
 	struct arm_vmregion *c, *new;
 
 	if (head->vm_end - head->vm_start < size) {
-		printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
-			__func__, size);
+		printk(KERN_WARNING "%s: allocation too big (requested %#x, end:%lx, start:%lx)\n",
+			__func__, size, head->vm_end, head->vm_start);
 		goto out;
 	}