Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 1 | /* |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 2 | * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. |
| 3 | * <benh@kernel.crashing.org> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation; either version |
| 8 | * 2 of the License, or (at your option) any later version. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/config.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/sched.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/smp.h> |
| 18 | #include <linux/smp_lock.h> |
| 19 | #include <linux/stddef.h> |
| 20 | #include <linux/unistd.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/user.h> |
| 23 | #include <linux/elf.h> |
| 24 | #include <linux/security.h> |
| 25 | #include <linux/bootmem.h> |
| 26 | |
| 27 | #include <asm/pgtable.h> |
| 28 | #include <asm/system.h> |
| 29 | #include <asm/processor.h> |
| 30 | #include <asm/mmu.h> |
| 31 | #include <asm/mmu_context.h> |
| 32 | #include <asm/lmb.h> |
| 33 | #include <asm/machdep.h> |
| 34 | #include <asm/cputable.h> |
| 35 | #include <asm/sections.h> |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame^] | 36 | #include <asm/firmware.h> |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 37 | #include <asm/vdso.h> |
| 38 | #include <asm/vdso_datapage.h> |
| 39 | |
| 40 | #undef DEBUG |
| 41 | |
| 42 | #ifdef DEBUG |
| 43 | #define DBG(fmt...) printk(fmt) |
| 44 | #else |
| 45 | #define DBG(fmt...) |
| 46 | #endif |
| 47 | |
| 48 | /* Max supported size for symbol names */ |
| 49 | #define MAX_SYMNAME 64 |
| 50 | |
| 51 | extern char vdso32_start, vdso32_end; |
| 52 | static void *vdso32_kbase = &vdso32_start; |
| 53 | unsigned int vdso32_pages; |
| 54 | unsigned long vdso32_sigtramp; |
| 55 | unsigned long vdso32_rt_sigtramp; |
| 56 | |
| 57 | #ifdef CONFIG_PPC64 |
| 58 | extern char vdso64_start, vdso64_end; |
| 59 | static void *vdso64_kbase = &vdso64_start; |
| 60 | unsigned int vdso64_pages; |
| 61 | unsigned long vdso64_rt_sigtramp; |
| 62 | #endif /* CONFIG_PPC64 */ |
| 63 | |
| 64 | /* |
| 65 | * The vdso data page (aka. systemcfg for old ppc64 fans) is here. |
| 66 | * Once the early boot kernel code no longer needs to muck around |
| 67 | * with it, it will become dynamically allocated |
| 68 | */ |
| 69 | static union { |
| 70 | struct vdso_data data; |
| 71 | u8 page[PAGE_SIZE]; |
| 72 | } vdso_data_store __attribute__((__section__(".data.page_aligned"))); |
| 73 | struct vdso_data *vdso_data = &vdso_data_store.data; |
| 74 | |
| 75 | /* Format of the patch table */ |
| 76 | struct vdso_patch_def |
| 77 | { |
| 78 | unsigned long ftr_mask, ftr_value; |
| 79 | const char *gen_name; |
| 80 | const char *fix_name; |
| 81 | }; |
| 82 | |
| 83 | /* Table of functions to patch based on the CPU type/revision |
| 84 | * |
| 85 | * Currently, we only change sync_dicache to do nothing on processors |
| 86 | * with a coherent icache |
| 87 | */ |
| 88 | static struct vdso_patch_def vdso_patches[] = { |
| 89 | { |
| 90 | CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE, |
| 91 | "__kernel_sync_dicache", "__kernel_sync_dicache_p5" |
| 92 | }, |
| 93 | { |
| 94 | CPU_FTR_USE_TB, 0, |
| 95 | "__kernel_gettimeofday", NULL |
| 96 | }, |
| 97 | }; |
| 98 | |
| 99 | /* |
| 100 | * Some infos carried around for each of them during parsing at |
| 101 | * boot time. |
| 102 | */ |
| 103 | struct lib32_elfinfo |
| 104 | { |
| 105 | Elf32_Ehdr *hdr; /* ptr to ELF */ |
| 106 | Elf32_Sym *dynsym; /* ptr to .dynsym section */ |
| 107 | unsigned long dynsymsize; /* size of .dynsym section */ |
| 108 | char *dynstr; /* ptr to .dynstr section */ |
| 109 | unsigned long text; /* offset of .text section in .so */ |
| 110 | }; |
| 111 | |
| 112 | struct lib64_elfinfo |
| 113 | { |
| 114 | Elf64_Ehdr *hdr; |
| 115 | Elf64_Sym *dynsym; |
| 116 | unsigned long dynsymsize; |
| 117 | char *dynstr; |
| 118 | unsigned long text; |
| 119 | }; |
| 120 | |
| 121 | |
| 122 | #ifdef __DEBUG |
| 123 | static void dump_one_vdso_page(struct page *pg, struct page *upg) |
| 124 | { |
| 125 | printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT), |
| 126 | page_count(pg), |
| 127 | pg->flags); |
| 128 | if (upg/* && pg != upg*/) { |
| 129 | printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) |
| 130 | << PAGE_SHIFT), |
| 131 | page_count(upg), |
| 132 | upg->flags); |
| 133 | } |
| 134 | printk("\n"); |
| 135 | } |
| 136 | |
| 137 | static void dump_vdso_pages(struct vm_area_struct * vma) |
| 138 | { |
| 139 | int i; |
| 140 | |
| 141 | if (!vma || test_thread_flag(TIF_32BIT)) { |
| 142 | printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); |
| 143 | for (i=0; i<vdso32_pages; i++) { |
| 144 | struct page *pg = virt_to_page(vdso32_kbase + |
| 145 | i*PAGE_SIZE); |
| 146 | struct page *upg = (vma && vma->vm_mm) ? |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 147 | follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 148 | : NULL; |
| 149 | dump_one_vdso_page(pg, upg); |
| 150 | } |
| 151 | } |
| 152 | if (!vma || !test_thread_flag(TIF_32BIT)) { |
| 153 | printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); |
| 154 | for (i=0; i<vdso64_pages; i++) { |
| 155 | struct page *pg = virt_to_page(vdso64_kbase + |
| 156 | i*PAGE_SIZE); |
| 157 | struct page *upg = (vma && vma->vm_mm) ? |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 158 | follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 159 | : NULL; |
| 160 | dump_one_vdso_page(pg, upg); |
| 161 | } |
| 162 | } |
| 163 | } |
| 164 | #endif /* DEBUG */ |
| 165 | |
| 166 | /* |
| 167 | * Keep a dummy vma_close for now, it will prevent VMA merging. |
| 168 | */ |
| 169 | static void vdso_vma_close(struct vm_area_struct * vma) |
| 170 | { |
| 171 | } |
| 172 | |
| 173 | /* |
| 174 | * Our nopage() function, maps in the actual vDSO kernel pages, they will |
| 175 | * be mapped read-only by do_no_page(), and eventually COW'ed, either |
| 176 | * right away for an initial write access, or by do_wp_page(). |
| 177 | */ |
| 178 | static struct page * vdso_vma_nopage(struct vm_area_struct * vma, |
| 179 | unsigned long address, int *type) |
| 180 | { |
| 181 | unsigned long offset = address - vma->vm_start; |
| 182 | struct page *pg; |
| 183 | #ifdef CONFIG_PPC64 |
Benjamin Herrenschmidt | 0551fbd | 2006-02-28 16:59:19 -0800 | [diff] [blame] | 184 | void *vbase = (vma->vm_mm->task_size > TASK_SIZE_USER32) ? |
| 185 | vdso64_kbase : vdso32_kbase; |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 186 | #else |
| 187 | void *vbase = vdso32_kbase; |
| 188 | #endif |
| 189 | |
| 190 | DBG("vdso_vma_nopage(current: %s, address: %016lx, off: %lx)\n", |
| 191 | current->comm, address, offset); |
| 192 | |
| 193 | if (address < vma->vm_start || address > vma->vm_end) |
| 194 | return NOPAGE_SIGBUS; |
| 195 | |
| 196 | /* |
| 197 | * Last page is systemcfg. |
| 198 | */ |
| 199 | if ((vma->vm_end - address) <= PAGE_SIZE) |
| 200 | pg = virt_to_page(vdso_data); |
| 201 | else |
| 202 | pg = virt_to_page(vbase + offset); |
| 203 | |
| 204 | get_page(pg); |
| 205 | DBG(" ->page count: %d\n", page_count(pg)); |
| 206 | |
| 207 | return pg; |
| 208 | } |
| 209 | |
| 210 | static struct vm_operations_struct vdso_vmops = { |
| 211 | .close = vdso_vma_close, |
| 212 | .nopage = vdso_vma_nopage, |
| 213 | }; |
| 214 | |
| 215 | /* |
| 216 | * This is called from binfmt_elf, we create the special vma for the |
| 217 | * vDSO and insert it into the mm struct tree |
| 218 | */ |
| 219 | int arch_setup_additional_pages(struct linux_binprm *bprm, |
| 220 | int executable_stack) |
| 221 | { |
| 222 | struct mm_struct *mm = current->mm; |
| 223 | struct vm_area_struct *vma; |
| 224 | unsigned long vdso_pages; |
| 225 | unsigned long vdso_base; |
| 226 | |
| 227 | #ifdef CONFIG_PPC64 |
| 228 | if (test_thread_flag(TIF_32BIT)) { |
| 229 | vdso_pages = vdso32_pages; |
| 230 | vdso_base = VDSO32_MBASE; |
| 231 | } else { |
| 232 | vdso_pages = vdso64_pages; |
| 233 | vdso_base = VDSO64_MBASE; |
| 234 | } |
| 235 | #else |
| 236 | vdso_pages = vdso32_pages; |
| 237 | vdso_base = VDSO32_MBASE; |
| 238 | #endif |
| 239 | |
| 240 | current->thread.vdso_base = 0; |
| 241 | |
| 242 | /* vDSO has a problem and was disabled, just don't "enable" it for the |
| 243 | * process |
| 244 | */ |
| 245 | if (vdso_pages == 0) |
| 246 | return 0; |
| 247 | |
| 248 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); |
| 249 | if (vma == NULL) |
| 250 | return -ENOMEM; |
| 251 | |
| 252 | memset(vma, 0, sizeof(*vma)); |
| 253 | |
| 254 | /* Add a page to the vdso size for the data page */ |
| 255 | vdso_pages ++; |
| 256 | |
| 257 | /* |
| 258 | * pick a base address for the vDSO in process space. We try to put it |
| 259 | * at vdso_base which is the "natural" base for it, but we might fail |
| 260 | * and end up putting it elsewhere. |
| 261 | */ |
| 262 | vdso_base = get_unmapped_area(NULL, vdso_base, |
| 263 | vdso_pages << PAGE_SHIFT, 0, 0); |
| 264 | if (vdso_base & ~PAGE_MASK) { |
| 265 | kmem_cache_free(vm_area_cachep, vma); |
| 266 | return (int)vdso_base; |
| 267 | } |
| 268 | |
| 269 | current->thread.vdso_base = vdso_base; |
| 270 | |
| 271 | vma->vm_mm = mm; |
| 272 | vma->vm_start = current->thread.vdso_base; |
| 273 | vma->vm_end = vma->vm_start + (vdso_pages << PAGE_SHIFT); |
| 274 | |
| 275 | /* |
| 276 | * our vma flags don't have VM_WRITE so by default, the process isn't |
| 277 | * allowed to write those pages. |
| 278 | * gdb can break that with ptrace interface, and thus trigger COW on |
| 279 | * those pages but it's then your responsibility to never do that on |
| 280 | * the "data" page of the vDSO or you'll stop getting kernel updates |
| 281 | * and your nice userland gettimeofday will be totally dead. |
| 282 | * It's fine to use that for setting breakpoints in the vDSO code |
| 283 | * pages though |
| 284 | */ |
Hugh Dickins | 0b14c17 | 2005-11-21 21:32:15 -0800 | [diff] [blame] | 285 | vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 286 | vma->vm_flags |= mm->def_flags; |
| 287 | vma->vm_page_prot = protection_map[vma->vm_flags & 0x7]; |
| 288 | vma->vm_ops = &vdso_vmops; |
| 289 | |
| 290 | down_write(&mm->mmap_sem); |
| 291 | if (insert_vm_struct(mm, vma)) { |
| 292 | up_write(&mm->mmap_sem); |
| 293 | kmem_cache_free(vm_area_cachep, vma); |
| 294 | return -ENOMEM; |
| 295 | } |
| 296 | mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 297 | up_write(&mm->mmap_sem); |
| 298 | |
| 299 | return 0; |
| 300 | } |
| 301 | |
| 302 | static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname, |
| 303 | unsigned long *size) |
| 304 | { |
| 305 | Elf32_Shdr *sechdrs; |
| 306 | unsigned int i; |
| 307 | char *secnames; |
| 308 | |
| 309 | /* Grab section headers and strings so we can tell who is who */ |
| 310 | sechdrs = (void *)ehdr + ehdr->e_shoff; |
| 311 | secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; |
| 312 | |
| 313 | /* Find the section they want */ |
| 314 | for (i = 1; i < ehdr->e_shnum; i++) { |
| 315 | if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { |
| 316 | if (size) |
| 317 | *size = sechdrs[i].sh_size; |
| 318 | return (void *)ehdr + sechdrs[i].sh_offset; |
| 319 | } |
| 320 | } |
| 321 | *size = 0; |
| 322 | return NULL; |
| 323 | } |
| 324 | |
| 325 | static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, |
| 326 | const char *symname) |
| 327 | { |
| 328 | unsigned int i; |
| 329 | char name[MAX_SYMNAME], *c; |
| 330 | |
| 331 | for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) { |
| 332 | if (lib->dynsym[i].st_name == 0) |
| 333 | continue; |
| 334 | strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, |
| 335 | MAX_SYMNAME); |
| 336 | c = strchr(name, '@'); |
| 337 | if (c) |
| 338 | *c = 0; |
| 339 | if (strcmp(symname, name) == 0) |
| 340 | return &lib->dynsym[i]; |
| 341 | } |
| 342 | return NULL; |
| 343 | } |
| 344 | |
| 345 | /* Note that we assume the section is .text and the symbol is relative to |
| 346 | * the library base |
| 347 | */ |
| 348 | static unsigned long __init find_function32(struct lib32_elfinfo *lib, |
| 349 | const char *symname) |
| 350 | { |
| 351 | Elf32_Sym *sym = find_symbol32(lib, symname); |
| 352 | |
| 353 | if (sym == NULL) { |
| 354 | printk(KERN_WARNING "vDSO32: function %s not found !\n", |
| 355 | symname); |
| 356 | return 0; |
| 357 | } |
| 358 | return sym->st_value - VDSO32_LBASE; |
| 359 | } |
| 360 | |
| 361 | static int vdso_do_func_patch32(struct lib32_elfinfo *v32, |
| 362 | struct lib64_elfinfo *v64, |
| 363 | const char *orig, const char *fix) |
| 364 | { |
| 365 | Elf32_Sym *sym32_gen, *sym32_fix; |
| 366 | |
| 367 | sym32_gen = find_symbol32(v32, orig); |
| 368 | if (sym32_gen == NULL) { |
| 369 | printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig); |
| 370 | return -1; |
| 371 | } |
| 372 | if (fix == NULL) { |
| 373 | sym32_gen->st_name = 0; |
| 374 | return 0; |
| 375 | } |
| 376 | sym32_fix = find_symbol32(v32, fix); |
| 377 | if (sym32_fix == NULL) { |
| 378 | printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix); |
| 379 | return -1; |
| 380 | } |
| 381 | sym32_gen->st_value = sym32_fix->st_value; |
| 382 | sym32_gen->st_size = sym32_fix->st_size; |
| 383 | sym32_gen->st_info = sym32_fix->st_info; |
| 384 | sym32_gen->st_other = sym32_fix->st_other; |
| 385 | sym32_gen->st_shndx = sym32_fix->st_shndx; |
| 386 | |
| 387 | return 0; |
| 388 | } |
| 389 | |
| 390 | |
| 391 | #ifdef CONFIG_PPC64 |
| 392 | |
| 393 | static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname, |
| 394 | unsigned long *size) |
| 395 | { |
| 396 | Elf64_Shdr *sechdrs; |
| 397 | unsigned int i; |
| 398 | char *secnames; |
| 399 | |
| 400 | /* Grab section headers and strings so we can tell who is who */ |
| 401 | sechdrs = (void *)ehdr + ehdr->e_shoff; |
| 402 | secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; |
| 403 | |
| 404 | /* Find the section they want */ |
| 405 | for (i = 1; i < ehdr->e_shnum; i++) { |
| 406 | if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) { |
| 407 | if (size) |
| 408 | *size = sechdrs[i].sh_size; |
| 409 | return (void *)ehdr + sechdrs[i].sh_offset; |
| 410 | } |
| 411 | } |
| 412 | if (size) |
| 413 | *size = 0; |
| 414 | return NULL; |
| 415 | } |
| 416 | |
| 417 | static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, |
| 418 | const char *symname) |
| 419 | { |
| 420 | unsigned int i; |
| 421 | char name[MAX_SYMNAME], *c; |
| 422 | |
| 423 | for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) { |
| 424 | if (lib->dynsym[i].st_name == 0) |
| 425 | continue; |
| 426 | strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, |
| 427 | MAX_SYMNAME); |
| 428 | c = strchr(name, '@'); |
| 429 | if (c) |
| 430 | *c = 0; |
| 431 | if (strcmp(symname, name) == 0) |
| 432 | return &lib->dynsym[i]; |
| 433 | } |
| 434 | return NULL; |
| 435 | } |
| 436 | |
| 437 | /* Note that we assume the section is .text and the symbol is relative to |
| 438 | * the library base |
| 439 | */ |
| 440 | static unsigned long __init find_function64(struct lib64_elfinfo *lib, |
| 441 | const char *symname) |
| 442 | { |
| 443 | Elf64_Sym *sym = find_symbol64(lib, symname); |
| 444 | |
| 445 | if (sym == NULL) { |
| 446 | printk(KERN_WARNING "vDSO64: function %s not found !\n", |
| 447 | symname); |
| 448 | return 0; |
| 449 | } |
| 450 | #ifdef VDS64_HAS_DESCRIPTORS |
| 451 | return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - |
| 452 | VDSO64_LBASE; |
| 453 | #else |
| 454 | return sym->st_value - VDSO64_LBASE; |
| 455 | #endif |
| 456 | } |
| 457 | |
| 458 | static int vdso_do_func_patch64(struct lib32_elfinfo *v32, |
| 459 | struct lib64_elfinfo *v64, |
| 460 | const char *orig, const char *fix) |
| 461 | { |
| 462 | Elf64_Sym *sym64_gen, *sym64_fix; |
| 463 | |
| 464 | sym64_gen = find_symbol64(v64, orig); |
| 465 | if (sym64_gen == NULL) { |
| 466 | printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig); |
| 467 | return -1; |
| 468 | } |
| 469 | if (fix == NULL) { |
| 470 | sym64_gen->st_name = 0; |
| 471 | return 0; |
| 472 | } |
| 473 | sym64_fix = find_symbol64(v64, fix); |
| 474 | if (sym64_fix == NULL) { |
| 475 | printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix); |
| 476 | return -1; |
| 477 | } |
| 478 | sym64_gen->st_value = sym64_fix->st_value; |
| 479 | sym64_gen->st_size = sym64_fix->st_size; |
| 480 | sym64_gen->st_info = sym64_fix->st_info; |
| 481 | sym64_gen->st_other = sym64_fix->st_other; |
| 482 | sym64_gen->st_shndx = sym64_fix->st_shndx; |
| 483 | |
| 484 | return 0; |
| 485 | } |
| 486 | |
| 487 | #endif /* CONFIG_PPC64 */ |
| 488 | |
| 489 | |
| 490 | static __init int vdso_do_find_sections(struct lib32_elfinfo *v32, |
| 491 | struct lib64_elfinfo *v64) |
| 492 | { |
| 493 | void *sect; |
| 494 | |
| 495 | /* |
| 496 | * Locate symbol tables & text section |
| 497 | */ |
| 498 | |
| 499 | v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize); |
| 500 | v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL); |
| 501 | if (v32->dynsym == NULL || v32->dynstr == NULL) { |
| 502 | printk(KERN_ERR "vDSO32: required symbol section not found\n"); |
| 503 | return -1; |
| 504 | } |
| 505 | sect = find_section32(v32->hdr, ".text", NULL); |
| 506 | if (sect == NULL) { |
| 507 | printk(KERN_ERR "vDSO32: the .text section was not found\n"); |
| 508 | return -1; |
| 509 | } |
| 510 | v32->text = sect - vdso32_kbase; |
| 511 | |
| 512 | #ifdef CONFIG_PPC64 |
| 513 | v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize); |
| 514 | v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL); |
| 515 | if (v64->dynsym == NULL || v64->dynstr == NULL) { |
| 516 | printk(KERN_ERR "vDSO64: required symbol section not found\n"); |
| 517 | return -1; |
| 518 | } |
| 519 | sect = find_section64(v64->hdr, ".text", NULL); |
| 520 | if (sect == NULL) { |
| 521 | printk(KERN_ERR "vDSO64: the .text section was not found\n"); |
| 522 | return -1; |
| 523 | } |
| 524 | v64->text = sect - vdso64_kbase; |
| 525 | #endif /* CONFIG_PPC64 */ |
| 526 | |
| 527 | return 0; |
| 528 | } |
| 529 | |
| 530 | static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32, |
| 531 | struct lib64_elfinfo *v64) |
| 532 | { |
| 533 | /* |
| 534 | * Find signal trampolines |
| 535 | */ |
| 536 | |
| 537 | #ifdef CONFIG_PPC64 |
| 538 | vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64"); |
| 539 | #endif |
| 540 | vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32"); |
| 541 | vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32"); |
| 542 | } |
| 543 | |
| 544 | static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32, |
| 545 | struct lib64_elfinfo *v64) |
| 546 | { |
| 547 | Elf32_Sym *sym32; |
| 548 | #ifdef CONFIG_PPC64 |
| 549 | Elf64_Sym *sym64; |
| 550 | |
| 551 | sym64 = find_symbol64(v64, "__kernel_datapage_offset"); |
| 552 | if (sym64 == NULL) { |
| 553 | printk(KERN_ERR "vDSO64: Can't find symbol " |
| 554 | "__kernel_datapage_offset !\n"); |
| 555 | return -1; |
| 556 | } |
| 557 | *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) = |
| 558 | (vdso64_pages << PAGE_SHIFT) - |
| 559 | (sym64->st_value - VDSO64_LBASE); |
| 560 | #endif /* CONFIG_PPC64 */ |
| 561 | |
| 562 | sym32 = find_symbol32(v32, "__kernel_datapage_offset"); |
| 563 | if (sym32 == NULL) { |
| 564 | printk(KERN_ERR "vDSO32: Can't find symbol " |
| 565 | "__kernel_datapage_offset !\n"); |
| 566 | return -1; |
| 567 | } |
| 568 | *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) = |
| 569 | (vdso32_pages << PAGE_SHIFT) - |
| 570 | (sym32->st_value - VDSO32_LBASE); |
| 571 | |
| 572 | return 0; |
| 573 | } |
| 574 | |
| 575 | static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32, |
| 576 | struct lib64_elfinfo *v64) |
| 577 | { |
| 578 | int i; |
| 579 | |
| 580 | for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) { |
| 581 | struct vdso_patch_def *patch = &vdso_patches[i]; |
| 582 | int match = (cur_cpu_spec->cpu_features & patch->ftr_mask) |
| 583 | == patch->ftr_value; |
| 584 | if (!match) |
| 585 | continue; |
| 586 | |
| 587 | DBG("replacing %s with %s...\n", patch->gen_name, |
| 588 | patch->fix_name ? "NONE" : patch->fix_name); |
| 589 | |
| 590 | /* |
| 591 | * Patch the 32 bits and 64 bits symbols. Note that we do not |
| 592 | * patch the "." symbol on 64 bits. |
| 593 | * It would be easy to do, but doesn't seem to be necessary, |
| 594 | * patching the OPD symbol is enough. |
| 595 | */ |
| 596 | vdso_do_func_patch32(v32, v64, patch->gen_name, |
| 597 | patch->fix_name); |
| 598 | #ifdef CONFIG_PPC64 |
| 599 | vdso_do_func_patch64(v32, v64, patch->gen_name, |
| 600 | patch->fix_name); |
| 601 | #endif /* CONFIG_PPC64 */ |
| 602 | } |
| 603 | |
| 604 | return 0; |
| 605 | } |
| 606 | |
| 607 | |
| 608 | static __init int vdso_setup(void) |
| 609 | { |
| 610 | struct lib32_elfinfo v32; |
| 611 | struct lib64_elfinfo v64; |
| 612 | |
| 613 | v32.hdr = vdso32_kbase; |
| 614 | #ifdef CONFIG_PPC64 |
| 615 | v64.hdr = vdso64_kbase; |
| 616 | #endif |
| 617 | if (vdso_do_find_sections(&v32, &v64)) |
| 618 | return -1; |
| 619 | |
| 620 | if (vdso_fixup_datapage(&v32, &v64)) |
| 621 | return -1; |
| 622 | |
| 623 | if (vdso_fixup_alt_funcs(&v32, &v64)) |
| 624 | return -1; |
| 625 | |
| 626 | vdso_setup_trampolines(&v32, &v64); |
| 627 | |
| 628 | return 0; |
| 629 | } |
| 630 | |
| 631 | /* |
| 632 | * Called from setup_arch to initialize the bitmap of available |
| 633 | * syscalls in the systemcfg page |
| 634 | */ |
| 635 | static void __init vdso_setup_syscall_map(void) |
| 636 | { |
| 637 | unsigned int i; |
| 638 | extern unsigned long *sys_call_table; |
| 639 | extern unsigned long sys_ni_syscall; |
| 640 | |
| 641 | |
| 642 | for (i = 0; i < __NR_syscalls; i++) { |
| 643 | #ifdef CONFIG_PPC64 |
| 644 | if (sys_call_table[i*2] != sys_ni_syscall) |
| 645 | vdso_data->syscall_map_64[i >> 5] |= |
| 646 | 0x80000000UL >> (i & 0x1f); |
| 647 | if (sys_call_table[i*2+1] != sys_ni_syscall) |
| 648 | vdso_data->syscall_map_32[i >> 5] |= |
| 649 | 0x80000000UL >> (i & 0x1f); |
| 650 | #else /* CONFIG_PPC64 */ |
| 651 | if (sys_call_table[i] != sys_ni_syscall) |
| 652 | vdso_data->syscall_map_32[i >> 5] |= |
| 653 | 0x80000000UL >> (i & 0x1f); |
| 654 | #endif /* CONFIG_PPC64 */ |
| 655 | } |
| 656 | } |
| 657 | |
| 658 | |
| 659 | void __init vdso_init(void) |
| 660 | { |
| 661 | int i; |
| 662 | |
| 663 | #ifdef CONFIG_PPC64 |
| 664 | /* |
| 665 | * Fill up the "systemcfg" stuff for backward compatiblity |
| 666 | */ |
| 667 | strcpy(vdso_data->eye_catcher, "SYSTEMCFG:PPC64"); |
| 668 | vdso_data->version.major = SYSTEMCFG_MAJOR; |
| 669 | vdso_data->version.minor = SYSTEMCFG_MINOR; |
| 670 | vdso_data->processor = mfspr(SPRN_PVR); |
Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame^] | 671 | /* |
| 672 | * Fake the old platform number for pSeries and iSeries and add |
| 673 | * in LPAR bit if necessary |
| 674 | */ |
| 675 | vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; |
| 676 | if (firmware_has_feature(FW_FEATURE_LPAR)) |
| 677 | vdso_data->platform |= 1; |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 678 | vdso_data->physicalMemorySize = lmb_phys_mem_size(); |
| 679 | vdso_data->dcache_size = ppc64_caches.dsize; |
| 680 | vdso_data->dcache_line_size = ppc64_caches.dline_size; |
| 681 | vdso_data->icache_size = ppc64_caches.isize; |
| 682 | vdso_data->icache_line_size = ppc64_caches.iline_size; |
| 683 | |
| 684 | /* |
| 685 | * Calculate the size of the 64 bits vDSO |
| 686 | */ |
| 687 | vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT; |
| 688 | DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages); |
| 689 | #endif /* CONFIG_PPC64 */ |
| 690 | |
| 691 | |
| 692 | /* |
| 693 | * Calculate the size of the 32 bits vDSO |
| 694 | */ |
| 695 | vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT; |
| 696 | DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages); |
| 697 | |
| 698 | |
| 699 | /* |
| 700 | * Setup the syscall map in the vDOS |
| 701 | */ |
| 702 | vdso_setup_syscall_map(); |
| 703 | /* |
| 704 | * Initialize the vDSO images in memory, that is do necessary |
| 705 | * fixups of vDSO symbols, locate trampolines, etc... |
| 706 | */ |
| 707 | if (vdso_setup()) { |
| 708 | printk(KERN_ERR "vDSO setup failure, not enabled !\n"); |
| 709 | vdso32_pages = 0; |
| 710 | #ifdef CONFIG_PPC64 |
| 711 | vdso64_pages = 0; |
| 712 | #endif |
| 713 | return; |
| 714 | } |
| 715 | |
| 716 | /* Make sure pages are in the correct state */ |
| 717 | for (i = 0; i < vdso32_pages; i++) { |
| 718 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); |
| 719 | ClearPageReserved(pg); |
| 720 | get_page(pg); |
| 721 | |
| 722 | } |
| 723 | #ifdef CONFIG_PPC64 |
| 724 | for (i = 0; i < vdso64_pages; i++) { |
| 725 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); |
| 726 | ClearPageReserved(pg); |
| 727 | get_page(pg); |
| 728 | } |
| 729 | #endif /* CONFIG_PPC64 */ |
| 730 | |
| 731 | get_page(virt_to_page(vdso_data)); |
| 732 | } |
| 733 | |
| 734 | int in_gate_area_no_task(unsigned long addr) |
| 735 | { |
| 736 | return 0; |
| 737 | } |
| 738 | |
| 739 | int in_gate_area(struct task_struct *task, unsigned long addr) |
| 740 | { |
| 741 | return 0; |
| 742 | } |
| 743 | |
| 744 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) |
| 745 | { |
| 746 | return NULL; |
| 747 | } |
| 748 | |