arm: mm: add functions to temporarily allow write to kernel text
STRICT_MEMORY_RWX write-protects the kernel text section. This
is a problem for tools such as kprobes which need write access
to kernel text space.
This patch introduces a function to temporarily make part of the
kernel text space writeable and another to restore the original state.
They can be called by code which is intentionally writing to
this space, while still leaving the kernel protected from
unintentional writes at other times.
Change-Id: I879009c41771198852952e5e7c3b4d1368f12d5f
Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index aeaa173..9beef12 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -25,6 +25,7 @@
#include <asm/tlb.h>
#include <asm/highmem.h>
#include <asm/traps.h>
+#include <asm/mmu_writeable.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -49,6 +50,9 @@
#define CPOLICY_WRITEBACK 3
#define CPOLICY_WRITEALLOC 4
+#define RX_AREA_START _text
+#define RX_AREA_END __start_rodata
+
static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0;
pgprot_t pgprot_user;
@@ -1041,6 +1045,90 @@
#endif
}
+#ifdef CONFIG_STRICT_MEMORY_RWX
+static struct {
+ pmd_t *pmd_to_flush;
+ pmd_t *pmd;
+ unsigned long addr;
+ pmd_t saved_pmd;
+ bool made_writeable;
+} mem_unprotect;
+
+static DEFINE_SPINLOCK(mem_text_writeable_lock);
+
+void mem_text_writeable_spinlock(unsigned long *flags)
+{
+ spin_lock_irqsave(&mem_text_writeable_lock, *flags);
+}
+
+void mem_text_writeable_spinunlock(unsigned long *flags)
+{
+ spin_unlock_irqrestore(&mem_text_writeable_lock, *flags);
+}
+
+/*
+ * mem_text_address_writeable() and mem_text_address_restore()
+ * should be called as a pair. They are used to make the
+ * specified address in the kernel text section temporarily writeable
+ * when it has been marked read-only by STRICT_MEMORY_RWX.
+ * Used by kprobes and other debugging tools to set breakpoints etc.
+ * mem_text_address_writeable() is invoked before writing.
+ * After the write, mem_text_address_restore() must be called
+ * to restore the original state.
+ * This is only effective when used on the kernel text section
+ * marked as MEMORY_RX by map_lowmem()
+ *
+ * They must each be called with mem_text_writeable_lock locked
+ * by the caller, with no unlocking between the calls.
+ * The caller should release mem_text_writeable_lock immediately
+ * after the call to mem_text_address_restore().
+ * Only the write and associated cache operations should be performed
+ * between the calls.
+ */
+
+/* this function must be called with mem_text_writeable_lock held */
+void mem_text_address_writeable(unsigned long addr)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->active_mm;
+ pgd_t *pgd = pgd_offset(mm, addr);
+ pud_t *pud = pud_offset(pgd, addr);
+
+ mem_unprotect.made_writeable = 0;
+
+ if ((addr < (unsigned long)RX_AREA_START) ||
+ (addr >= (unsigned long)RX_AREA_END))
+ return;
+
+ mem_unprotect.pmd = pmd_offset(pud, addr);
+ mem_unprotect.pmd_to_flush = mem_unprotect.pmd;
+ mem_unprotect.addr = addr & PAGE_MASK;
+
+ if (addr & SECTION_SIZE)
+ mem_unprotect.pmd++;
+
+ mem_unprotect.saved_pmd = *mem_unprotect.pmd;
+ if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) != PMD_TYPE_SECT)
+ return;
+
+ *mem_unprotect.pmd &= ~PMD_SECT_APX;
+
+ flush_pmd_entry(mem_unprotect.pmd_to_flush);
+ flush_tlb_kernel_page(mem_unprotect.addr);
+ mem_unprotect.made_writeable = 1;
+}
+
+/* this function must be called with mem_text_writeable_lock held */
+void mem_text_address_restore(void)
+{
+ if (mem_unprotect.made_writeable) {
+ *mem_unprotect.pmd = mem_unprotect.saved_pmd;
+ flush_pmd_entry(mem_unprotect.pmd_to_flush);
+ flush_tlb_kernel_page(mem_unprotect.addr);
+ }
+}
+#endif
+
static void __init map_lowmem(void)
{
struct memblock_region *reg;
@@ -1065,9 +1153,9 @@
create_mapping(&map);
- map.pfn = __phys_to_pfn(__pa(_text));
- map.virtual = (unsigned long)_text;
- map.length = __start_rodata - _text;
+ map.pfn = __phys_to_pfn(__pa(RX_AREA_START));
+ map.virtual = (unsigned long)RX_AREA_START;
+ map.length = RX_AREA_END - RX_AREA_START;
map.type = MT_MEMORY_RX;
create_mapping(&map);