blob: 16fc6a28882d8b11dede6ecdec95fa73f7bee736 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#include "linux/stddef.h"
8#include "linux/kernel.h"
9#include "linux/sched.h"
10#include "linux/mm.h"
11#include "asm/page.h"
12#include "asm/pgtable.h"
13#include "asm/uaccess.h"
14#include "asm/tlbflush.h"
15#include "user_util.h"
16#include "mem_user.h"
17#include "os.h"
18#include "tlb.h"
19
Jeff Dikec5600492005-09-03 15:57:36 -070020static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
21 int finished, void *flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -070022{
23 struct host_vm_op *op;
24 int i;
25
26 for(i = 0; i <= last; i++){
27 op = &ops[i];
28 switch(op->type){
29 case MMAP:
30 os_map_memory((void *) op->u.mmap.addr, op->u.mmap.fd,
31 op->u.mmap.offset, op->u.mmap.len,
32 op->u.mmap.r, op->u.mmap.w,
33 op->u.mmap.x);
34 break;
35 case MUNMAP:
36 os_unmap_memory((void *) op->u.munmap.addr,
37 op->u.munmap.len);
38 break;
39 case MPROTECT:
40 protect_memory(op->u.mprotect.addr, op->u.munmap.len,
41 op->u.mprotect.r, op->u.mprotect.w,
42 op->u.mprotect.x, 1);
43 break;
44 default:
45 printk("Unknown op type %d in do_ops\n", op->type);
46 break;
47 }
48 }
Jeff Dikec5600492005-09-03 15:57:36 -070049
50 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051}
52
53static void fix_range(struct mm_struct *mm, unsigned long start_addr,
54 unsigned long end_addr, int force)
55{
56 if((current->thread.mode.tt.extern_pid != -1) &&
57 (current->thread.mode.tt.extern_pid != os_getpid()))
58 panic("fix_range fixing wrong address space, current = 0x%p",
59 current);
60
Jeff Diked67b5692005-07-07 17:56:49 -070061 fix_range_common(mm, start_addr, end_addr, force, do_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
64atomic_t vmchange_seq = ATOMIC_INIT(1);
65
66void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end)
67{
68 if(flush_tlb_kernel_range_common(start, end))
69 atomic_inc(&vmchange_seq);
70}
71
72static void protect_vm_page(unsigned long addr, int w, int must_succeed)
73{
74 int err;
75
76 err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed);
77 if(err == 0) return;
78 else if((err == -EFAULT) || (err == -ENOMEM)){
79 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
80 protect_vm_page(addr, w, 1);
81 }
82 else panic("protect_vm_page : protect failed, errno = %d\n", err);
83}
84
85void mprotect_kernel_vm(int w)
86{
87 struct mm_struct *mm;
88 pgd_t *pgd;
89 pud_t *pud;
90 pmd_t *pmd;
91 pte_t *pte;
92 unsigned long addr;
93
94 mm = &init_mm;
95 for(addr = start_vm; addr < end_vm;){
96 pgd = pgd_offset(mm, addr);
97 pud = pud_offset(pgd, addr);
98 pmd = pmd_offset(pud, addr);
99 if(pmd_present(*pmd)){
100 pte = pte_offset_kernel(pmd, addr);
101 if(pte_present(*pte)) protect_vm_page(addr, w, 0);
102 addr += PAGE_SIZE;
103 }
104 else addr += PMD_SIZE;
105 }
106}
107
108void flush_tlb_kernel_vm_tt(void)
109{
110 flush_tlb_kernel_range(start_vm, end_vm);
111}
112
113void __flush_tlb_one_tt(unsigned long addr)
114{
115 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
116}
117
118void flush_tlb_range_tt(struct vm_area_struct *vma, unsigned long start,
119 unsigned long end)
120{
121 if(vma->vm_mm != current->mm) return;
122
123 /* Assumes that the range start ... end is entirely within
124 * either process memory or kernel vm
125 */
126 if((start >= start_vm) && (start < end_vm)){
127 if(flush_tlb_kernel_range_common(start, end))
128 atomic_inc(&vmchange_seq);
129 }
130 else fix_range(vma->vm_mm, start, end, 0);
131}
132
133void flush_tlb_mm_tt(struct mm_struct *mm)
134{
135 unsigned long seq;
136
137 if(mm != current->mm) return;
138
139 fix_range(mm, 0, STACK_TOP, 0);
140
141 seq = atomic_read(&vmchange_seq);
142 if(current->thread.mode.tt.vm_seq == seq)
143 return;
144 current->thread.mode.tt.vm_seq = seq;
145 flush_tlb_kernel_range_common(start_vm, end_vm);
146}
147
148void force_flush_all_tt(void)
149{
150 fix_range(current->mm, 0, STACK_TOP, 1);
151 flush_tlb_kernel_range_common(start_vm, end_vm);
152}