Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4
AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.
* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
PRNG: Device tree entry for qrng device.
vidc:1080p: Set video core timeout value for Thumbnail mode
msm: sps: improve the debugging support in SPS driver
board-8064 msm: Overlap secure and non secure video firmware heaps.
msm: clock: Add handoff ops for 7x30 and copper XO clocks
msm_fb: display: Wait for external vsync before DTV IOMMU unmap
msm: Fix ciruclar dependency in debug UART settings
msm: gdsc: Add GDSC regulator driver for msm-copper
defconfig: Enable Mobicore Driver.
mobicore: Add mobicore driver.
mobicore: rename variable to lower case.
mobicore: rename folder.
mobicore: add makefiles
mobicore: initial import of kernel driver
ASoC: msm: Add SLIMBUS_2_RX CPU DAI
board-8064-gpio: Update FUNC for EPM SPI CS
msm_fb: display: Remove chicken bit config during video playback
mmc: msm_sdcc: enable the sanitize capability
msm-fb: display: lm2 writeback support on mpq platfroms
msm_fb: display: Disable LVDS phy & pll during panel off
...
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 137858b..e6b733b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -28,6 +28,7 @@
#include <asm/highmem.h>
#include <asm/system_info.h>
#include <asm/traps.h>
+#include <asm/mmu_writeable.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -52,6 +53,9 @@
#define CPOLICY_WRITEBACK 3
#define CPOLICY_WRITEALLOC 4
+#define RX_AREA_START _text
+#define RX_AREA_END __start_rodata
+
static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user;
@@ -257,6 +261,18 @@
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_R] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
+ [MT_MEMORY_RW] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
+ [MT_MEMORY_RX] = {
+ .prot_sect = PMD_TYPE_SECT,
+ .domain = DOMAIN_KERNEL,
+ },
[MT_ROM] = {
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
@@ -442,6 +458,8 @@
* from SVC mode and no access from userspace.
*/
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
#endif
@@ -461,6 +479,9 @@
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
}
@@ -513,6 +534,9 @@
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
+ mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
@@ -576,6 +600,7 @@
BUG_ON(pmd_bad(*pmd));
}
+#ifdef CONFIG_HIGHMEM
static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
unsigned long addr, unsigned long prot)
{
@@ -586,6 +611,7 @@
BUG_ON(pmd_bad(*pmd));
return pte_offset_kernel(pmd, addr);
}
+#endif
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
@@ -842,6 +868,14 @@
{
int i, j, highmem = 0;
+#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
+ find_membank0_hole();
+#endif
+
+#if (defined CONFIG_HIGHMEM) && (defined CONFIG_FIX_MOVABLE_ZONE)
+ if (movable_reserved_size && __pa(vmalloc_min) > movable_reserved_start)
+ vmalloc_min = __va(movable_reserved_start);
+#endif
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
@@ -1105,6 +1139,105 @@
#endif
}
+#ifdef CONFIG_STRICT_MEMORY_RWX
+static struct {
+ pmd_t *pmd_to_flush;
+ pmd_t *pmd;
+ unsigned long addr;
+ pmd_t saved_pmd;
+ bool made_writeable;
+} mem_unprotect;
+
+static DEFINE_SPINLOCK(mem_text_writeable_lock);
+
+void mem_text_writeable_spinlock(unsigned long *flags)
+{
+ spin_lock_irqsave(&mem_text_writeable_lock, *flags);
+}
+
+void mem_text_writeable_spinunlock(unsigned long *flags)
+{
+ spin_unlock_irqrestore(&mem_text_writeable_lock, *flags);
+}
+
+/*
+ * mem_text_address_writeable() and mem_text_address_restore()
+ * should be called as a pair. They are used to make the
+ * specified address in the kernel text section temporarily writeable
+ * when it has been marked read-only by STRICT_MEMORY_RWX.
+ * Used by kprobes and other debugging tools to set breakpoints etc.
+ * mem_text_address_writeable() is invoked before writing.
+ * After the write, mem_text_address_restore() must be called
+ * to restore the original state.
+ * This is only effective when used on the kernel text section
+ * marked as MEMORY_RX by map_lowmem()
+ *
+ * They must each be called with mem_text_writeable_lock locked
+ * by the caller, with no unlocking between the calls.
+ * The caller should release mem_text_writeable_lock immediately
+ * after the call to mem_text_address_restore().
+ * Only the write and associated cache operations should be performed
+ * between the calls.
+ */
+
+/* this function must be called with mem_text_writeable_lock held */
+void mem_text_address_writeable(unsigned long addr)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->active_mm;
+ pgd_t *pgd = pgd_offset(mm, addr);
+ pud_t *pud = pud_offset(pgd, addr);
+
+ mem_unprotect.made_writeable = 0;
+
+ if ((addr < (unsigned long)RX_AREA_START) ||
+ (addr >= (unsigned long)RX_AREA_END))
+ return;
+
+ mem_unprotect.pmd = pmd_offset(pud, addr);
+ mem_unprotect.pmd_to_flush = mem_unprotect.pmd;
+ mem_unprotect.addr = addr & PAGE_MASK;
+
+ if (addr & SECTION_SIZE)
+ mem_unprotect.pmd++;
+
+ mem_unprotect.saved_pmd = *mem_unprotect.pmd;
+ if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) != PMD_TYPE_SECT)
+ return;
+
+ *mem_unprotect.pmd &= ~PMD_SECT_APX;
+
+ flush_pmd_entry(mem_unprotect.pmd_to_flush);
+ flush_tlb_kernel_page(mem_unprotect.addr);
+ mem_unprotect.made_writeable = 1;
+}
+
+/* this function must be called with mem_text_writeable_lock held */
+void mem_text_address_restore(void)
+{
+ if (mem_unprotect.made_writeable) {
+ *mem_unprotect.pmd = mem_unprotect.saved_pmd;
+ flush_pmd_entry(mem_unprotect.pmd_to_flush);
+ flush_tlb_kernel_page(mem_unprotect.addr);
+ }
+}
+#endif
+
+void mem_text_write_kernel_word(unsigned long *addr, unsigned long word)
+{
+ unsigned long flags;
+
+ mem_text_writeable_spinlock(&flags);
+ mem_text_address_writeable((unsigned long)addr);
+ *addr = word;
+ flush_icache_range((unsigned long)addr,
+ ((unsigned long)addr + sizeof(long)));
+ mem_text_address_restore();
+ mem_text_writeable_spinunlock(&flags);
+}
+EXPORT_SYMBOL(mem_text_write_kernel_word);
+
+extern char __init_data[];
static void __init map_lowmem(void)
{
@@ -1125,8 +1258,46 @@
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
+#ifdef CONFIG_STRICT_MEMORY_RWX
+ if (start <= __pa(_text) && __pa(_text) < end) {
+ map.length = SECTION_SIZE;
+ map.type = MT_MEMORY;
+
+ create_mapping(&map, false);
+
+ map.pfn = __phys_to_pfn(start + SECTION_SIZE);
+ map.virtual = __phys_to_virt(start + SECTION_SIZE);
+ map.length = (unsigned long)RX_AREA_END - map.virtual;
+ map.type = MT_MEMORY_RX;
+
+ create_mapping(&map, false);
+
+ map.pfn = __phys_to_pfn(__pa(__start_rodata));
+ map.virtual = (unsigned long)__start_rodata;
+ map.length = __init_begin - __start_rodata;
+ map.type = MT_MEMORY_R;
+
+ create_mapping(&map, false);
+
+ map.pfn = __phys_to_pfn(__pa(__init_begin));
+ map.virtual = (unsigned long)__init_begin;
+ map.length = __init_data - __init_begin;
+ map.type = MT_MEMORY;
+
+ create_mapping(&map, false);
+
+ map.pfn = __phys_to_pfn(__pa(__init_data));
+ map.virtual = (unsigned long)__init_data;
+ map.length = __phys_to_virt(end) - (unsigned int)__init_data;
+ map.type = MT_MEMORY_RW;
+ } else {
+ map.length = end - start;
+ map.type = MT_MEMORY_RW;
+ }
+#else
map.length = end - start;
map.type = MT_MEMORY;
+#endif
create_mapping(&map, false);
}