Paul Mundt | 19f9a34 | 2006-09-27 18:33:49 +0900 | [diff] [blame^] | 1 | /* |
| 2 | * arch/sh/kernel/vsyscall.c |
| 3 | * |
| 4 | * Copyright (C) 2006 Paul Mundt |
| 5 | * |
| 6 | * vDSO randomization |
| 7 | * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar |
| 8 | * |
| 9 | * This file is subject to the terms and conditions of the GNU General Public |
| 10 | * License. See the file "COPYING" in the main directory of this archive |
| 11 | * for more details. |
| 12 | */ |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/gfp.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/elf.h> |
| 20 | |
| 21 | /* |
| 22 | * Should the kernel map a VDSO page into processes and pass its |
| 23 | * address down to glibc upon exec()? |
| 24 | */ |
| 25 | unsigned int __read_mostly vdso_enabled = 1; |
| 26 | EXPORT_SYMBOL_GPL(vdso_enabled); |
| 27 | |
| 28 | static int __init vdso_setup(char *s) |
| 29 | { |
| 30 | vdso_enabled = simple_strtoul(s, NULL, 0); |
| 31 | return 1; |
| 32 | } |
| 33 | __setup("vdso=", vdso_setup); |
| 34 | |
| 35 | /* |
| 36 | * These symbols are defined by vsyscall.o to mark the bounds |
| 37 | * of the ELF DSO images included therein. |
| 38 | */ |
| 39 | extern const char vsyscall_trapa_start, vsyscall_trapa_end; |
| 40 | static void *syscall_page; |
| 41 | |
| 42 | int __init vsyscall_init(void) |
| 43 | { |
| 44 | syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); |
| 45 | |
| 46 | /* |
| 47 | * XXX: Map this page to a fixmap entry if we get around |
| 48 | * to adding the page to ELF core dumps |
| 49 | */ |
| 50 | |
| 51 | memcpy(syscall_page, |
| 52 | &vsyscall_trapa_start, |
| 53 | &vsyscall_trapa_end - &vsyscall_trapa_start); |
| 54 | |
| 55 | return 0; |
| 56 | } |
| 57 | |
| 58 | static struct page *syscall_vma_nopage(struct vm_area_struct *vma, |
| 59 | unsigned long address, int *type) |
| 60 | { |
| 61 | unsigned long offset = address - vma->vm_start; |
| 62 | struct page *page; |
| 63 | |
| 64 | if (address < vma->vm_start || address > vma->vm_end) |
| 65 | return NOPAGE_SIGBUS; |
| 66 | |
| 67 | page = virt_to_page(syscall_page + offset); |
| 68 | |
| 69 | get_page(page); |
| 70 | |
| 71 | return page; |
| 72 | } |
| 73 | |
| 74 | /* Prevent VMA merging */ |
| 75 | static void syscall_vma_close(struct vm_area_struct *vma) |
| 76 | { |
| 77 | } |
| 78 | |
| 79 | static struct vm_operations_struct syscall_vm_ops = { |
| 80 | .nopage = syscall_vma_nopage, |
| 81 | .close = syscall_vma_close, |
| 82 | }; |
| 83 | |
| 84 | /* Setup a VMA at program startup for the vsyscall page */ |
| 85 | int arch_setup_additional_pages(struct linux_binprm *bprm, |
| 86 | int executable_stack) |
| 87 | { |
| 88 | struct vm_area_struct *vma; |
| 89 | struct mm_struct *mm = current->mm; |
| 90 | unsigned long addr; |
| 91 | int ret; |
| 92 | |
| 93 | down_write(&mm->mmap_sem); |
| 94 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); |
| 95 | if (IS_ERR_VALUE(addr)) { |
| 96 | ret = addr; |
| 97 | goto up_fail; |
| 98 | } |
| 99 | |
| 100 | vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); |
| 101 | if (!vma) { |
| 102 | ret = -ENOMEM; |
| 103 | goto up_fail; |
| 104 | } |
| 105 | |
| 106 | vma->vm_start = addr; |
| 107 | vma->vm_end = addr + PAGE_SIZE; |
| 108 | /* MAYWRITE to allow gdb to COW and set breakpoints */ |
| 109 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; |
| 110 | vma->vm_flags |= mm->def_flags; |
| 111 | vma->vm_page_prot = protection_map[vma->vm_flags & 7]; |
| 112 | vma->vm_ops = &syscall_vm_ops; |
| 113 | vma->vm_mm = mm; |
| 114 | |
| 115 | ret = insert_vm_struct(mm, vma); |
| 116 | if (unlikely(ret)) { |
| 117 | kmem_cache_free(vm_area_cachep, vma); |
| 118 | goto up_fail; |
| 119 | } |
| 120 | |
| 121 | current->mm->context.vdso = (void *)addr; |
| 122 | |
| 123 | mm->total_vm++; |
| 124 | up_fail: |
| 125 | up_write(&mm->mmap_sem); |
| 126 | return ret; |
| 127 | } |
| 128 | |
| 129 | const char *arch_vma_name(struct vm_area_struct *vma) |
| 130 | { |
| 131 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) |
| 132 | return "[vdso]"; |
| 133 | |
| 134 | return NULL; |
| 135 | } |
| 136 | |
| 137 | struct vm_area_struct *get_gate_vma(struct task_struct *task) |
| 138 | { |
| 139 | return NULL; |
| 140 | } |
| 141 | |
| 142 | int in_gate_area(struct task_struct *task, unsigned long address) |
| 143 | { |
| 144 | return 0; |
| 145 | } |
| 146 | |
| 147 | int in_gate_area_no_task(unsigned long address) |
| 148 | { |
| 149 | return 0; |
| 150 | } |