| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) | 
|  | 3 | * Copyright 2003 PathScale, Inc. | 
|  | 4 | * Licensed under the GPL | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | #include "linux/stddef.h" | 
|  | 8 | #include "linux/kernel.h" | 
|  | 9 | #include "linux/sched.h" | 
|  | 10 | #include "linux/mm.h" | 
|  | 11 | #include "asm/page.h" | 
|  | 12 | #include "asm/pgtable.h" | 
|  | 13 | #include "asm/uaccess.h" | 
|  | 14 | #include "asm/tlbflush.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include "mem_user.h" | 
|  | 16 | #include "os.h" | 
|  | 17 | #include "tlb.h" | 
|  | 18 |  | 
| Bodo Stroesser | 07bf731 | 2005-09-03 15:57:50 -0700 | [diff] [blame] | 19 | static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, | 
|  | 20 | int finished, void **flush) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | { | 
|  | 22 | struct host_vm_op *op; | 
| Bodo Stroesser | 07bf731 | 2005-09-03 15:57:50 -0700 | [diff] [blame] | 23 | int i, ret=0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 |  | 
| Bodo Stroesser | 07bf731 | 2005-09-03 15:57:50 -0700 | [diff] [blame] | 25 | for(i = 0; i <= last && !ret; i++){ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | op = &ops[i]; | 
|  | 27 | switch(op->type){ | 
|  | 28 | case MMAP: | 
| Bodo Stroesser | 07bf731 | 2005-09-03 15:57:50 -0700 | [diff] [blame] | 29 | ret = os_map_memory((void *) op->u.mmap.addr, | 
|  | 30 | op->u.mmap.fd, op->u.mmap.offset, | 
|  | 31 | op->u.mmap.len, op->u.mmap.r, | 
|  | 32 | op->u.mmap.w, op->u.mmap.x); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | break; | 
|  | 34 | case MUNMAP: | 
| Bodo Stroesser | 07bf731 | 2005-09-03 15:57:50 -0700 | [diff] [blame] | 35 | ret = os_unmap_memory((void *) op->u.munmap.addr, | 
|  | 36 | op->u.munmap.len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | break; | 
|  | 38 | case MPROTECT: | 
| Bodo Stroesser | 07bf731 | 2005-09-03 15:57:50 -0700 | [diff] [blame] | 39 | ret = protect_memory(op->u.mprotect.addr, | 
|  | 40 | op->u.munmap.len, | 
|  | 41 | op->u.mprotect.r, | 
|  | 42 | op->u.mprotect.w, | 
|  | 43 | op->u.mprotect.x, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | protect_memory(op->u.mprotect.addr, op->u.munmap.len, | 
|  | 45 | op->u.mprotect.r, op->u.mprotect.w, | 
|  | 46 | op->u.mprotect.x, 1); | 
|  | 47 | break; | 
|  | 48 | default: | 
|  | 49 | printk("Unknown op type %d in do_ops\n", op->type); | 
|  | 50 | break; | 
|  | 51 | } | 
|  | 52 | } | 
| Jeff Dike | c560049 | 2005-09-03 15:57:36 -0700 | [diff] [blame] | 53 |  | 
| Bodo Stroesser | 07bf731 | 2005-09-03 15:57:50 -0700 | [diff] [blame] | 54 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | } | 
|  | 56 |  | 
|  | 57 | static void fix_range(struct mm_struct *mm, unsigned long start_addr, | 
|  | 58 | unsigned long end_addr, int force) | 
|  | 59 | { | 
|  | 60 | if((current->thread.mode.tt.extern_pid != -1) && | 
|  | 61 | (current->thread.mode.tt.extern_pid != os_getpid())) | 
|  | 62 | panic("fix_range fixing wrong address space, current = 0x%p", | 
|  | 63 | current); | 
|  | 64 |  | 
| Jeff Dike | d67b569 | 2005-07-07 17:56:49 -0700 | [diff] [blame] | 65 | fix_range_common(mm, start_addr, end_addr, force, do_ops); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | } | 
|  | 67 |  | 
|  | 68 | atomic_t vmchange_seq = ATOMIC_INIT(1); | 
|  | 69 |  | 
|  | 70 | void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end) | 
|  | 71 | { | 
|  | 72 | if(flush_tlb_kernel_range_common(start, end)) | 
|  | 73 | atomic_inc(&vmchange_seq); | 
|  | 74 | } | 
|  | 75 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | void flush_tlb_kernel_vm_tt(void) | 
|  | 77 | { | 
|  | 78 | flush_tlb_kernel_range(start_vm, end_vm); | 
|  | 79 | } | 
|  | 80 |  | 
|  | 81 | void __flush_tlb_one_tt(unsigned long addr) | 
|  | 82 | { | 
|  | 83 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | void flush_tlb_range_tt(struct vm_area_struct *vma, unsigned long start, | 
|  | 87 | unsigned long end) | 
|  | 88 | { | 
|  | 89 | if(vma->vm_mm != current->mm) return; | 
|  | 90 |  | 
|  | 91 | /* Assumes that the range start ... end is entirely within | 
|  | 92 | * either process memory or kernel vm | 
|  | 93 | */ | 
|  | 94 | if((start >= start_vm) && (start < end_vm)){ | 
|  | 95 | if(flush_tlb_kernel_range_common(start, end)) | 
|  | 96 | atomic_inc(&vmchange_seq); | 
|  | 97 | } | 
|  | 98 | else fix_range(vma->vm_mm, start, end, 0); | 
|  | 99 | } | 
|  | 100 |  | 
|  | 101 | void flush_tlb_mm_tt(struct mm_struct *mm) | 
|  | 102 | { | 
|  | 103 | unsigned long seq; | 
|  | 104 |  | 
|  | 105 | if(mm != current->mm) return; | 
|  | 106 |  | 
|  | 107 | fix_range(mm, 0, STACK_TOP, 0); | 
|  | 108 |  | 
|  | 109 | seq = atomic_read(&vmchange_seq); | 
|  | 110 | if(current->thread.mode.tt.vm_seq == seq) | 
|  | 111 | return; | 
|  | 112 | current->thread.mode.tt.vm_seq = seq; | 
|  | 113 | flush_tlb_kernel_range_common(start_vm, end_vm); | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 | void force_flush_all_tt(void) | 
|  | 117 | { | 
|  | 118 | fix_range(current->mm, 0, STACK_TOP, 1); | 
|  | 119 | flush_tlb_kernel_range_common(start_vm, end_vm); | 
|  | 120 | } |