Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/cris/arch-v10/mm/tlb.c |
| 3 | * |
| 4 | * Low level TLB handling |
| 5 | * |
| 6 | * |
| 7 | * Copyright (C) 2000-2002 Axis Communications AB |
| 8 | * |
| 9 | * Authors: Bjorn Wesen (bjornw@axis.com) |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <asm/tlb.h> |
| 14 | #include <asm/mmu_context.h> |
| 15 | #include <asm/arch/svinto.h> |
| 16 | |
| 17 | #define D(x) |
| 18 | |
| 19 | /* The TLB can host up to 64 different mm contexts at the same time. |
| 20 | * The running context is R_MMU_CONTEXT, and each TLB entry contains a |
| 21 | * page_id that has to match to give a hit. In page_id_map, we keep track |
| 22 | * of which mm's we have assigned which page_id's, so that we know when |
| 23 | * to invalidate TLB entries. |
| 24 | * |
| 25 | * The last page_id is never running - it is used as an invalid page_id |
| 26 | * so we can make TLB entries that will never match. |
| 27 | * |
| 28 | * Notice that we need to make the flushes atomic, otherwise an interrupt |
| 29 | * handler that uses vmalloced memory might cause a TLB load in the middle |
| 30 | * of a flush causing. |
| 31 | */ |
| 32 | |
| 33 | /* invalidate all TLB entries */ |
| 34 | |
| 35 | void |
| 36 | flush_tlb_all(void) |
| 37 | { |
| 38 | int i; |
| 39 | unsigned long flags; |
| 40 | |
| 41 | /* the vpn of i & 0xf is so we dont write similar TLB entries |
| 42 | * in the same 4-way entry group. details.. |
| 43 | */ |
| 44 | |
| 45 | local_save_flags(flags); |
| 46 | local_irq_disable(); |
| 47 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 48 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); |
| 49 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 50 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); |
| 51 | |
| 52 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 53 | IO_STATE(R_TLB_LO, valid, no ) | |
| 54 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 55 | IO_STATE(R_TLB_LO, we, no ) | |
| 56 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 57 | } |
| 58 | local_irq_restore(flags); |
| 59 | D(printk("tlb: flushed all\n")); |
| 60 | } |
| 61 | |
| 62 | /* invalidate the selected mm context only */ |
| 63 | |
| 64 | void |
| 65 | flush_tlb_mm(struct mm_struct *mm) |
| 66 | { |
| 67 | int i; |
| 68 | int page_id = mm->context.page_id; |
| 69 | unsigned long flags; |
| 70 | |
| 71 | D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); |
| 72 | |
| 73 | if(page_id == NO_CONTEXT) |
| 74 | return; |
| 75 | |
| 76 | /* mark the TLB entries that match the page_id as invalid. |
| 77 | * here we could also check the _PAGE_GLOBAL bit and NOT flush |
| 78 | * global pages. is it worth the extra I/O ? |
| 79 | */ |
| 80 | |
| 81 | local_save_flags(flags); |
| 82 | local_irq_disable(); |
| 83 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 84 | *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); |
| 85 | if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { |
| 86 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 87 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); |
| 88 | |
| 89 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 90 | IO_STATE(R_TLB_LO, valid, no ) | |
| 91 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 92 | IO_STATE(R_TLB_LO, we, no ) | |
| 93 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 94 | } |
| 95 | } |
| 96 | local_irq_restore(flags); |
| 97 | } |
| 98 | |
| 99 | /* invalidate a single page */ |
| 100 | |
| 101 | void |
| 102 | flush_tlb_page(struct vm_area_struct *vma, |
| 103 | unsigned long addr) |
| 104 | { |
| 105 | struct mm_struct *mm = vma->vm_mm; |
| 106 | int page_id = mm->context.page_id; |
| 107 | int i; |
| 108 | unsigned long flags; |
| 109 | |
| 110 | D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); |
| 111 | |
| 112 | if(page_id == NO_CONTEXT) |
| 113 | return; |
| 114 | |
| 115 | addr &= PAGE_MASK; /* perhaps not necessary */ |
| 116 | |
| 117 | /* invalidate those TLB entries that match both the mm context |
| 118 | * and the virtual address requested |
| 119 | */ |
| 120 | |
| 121 | local_save_flags(flags); |
| 122 | local_irq_disable(); |
| 123 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 124 | unsigned long tlb_hi; |
| 125 | *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); |
| 126 | tlb_hi = *R_TLB_HI; |
| 127 | if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && |
| 128 | (tlb_hi & PAGE_MASK) == addr) { |
| 129 | *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 130 | addr; /* same addr as before works. */ |
| 131 | |
| 132 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 133 | IO_STATE(R_TLB_LO, valid, no ) | |
| 134 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 135 | IO_STATE(R_TLB_LO, we, no ) | |
| 136 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 137 | } |
| 138 | } |
| 139 | local_irq_restore(flags); |
| 140 | } |
| 141 | |
| 142 | /* invalidate a page range */ |
| 143 | |
| 144 | void |
| 145 | flush_tlb_range(struct vm_area_struct *vma, |
| 146 | unsigned long start, |
| 147 | unsigned long end) |
| 148 | { |
| 149 | struct mm_struct *mm = vma->vm_mm; |
| 150 | int page_id = mm->context.page_id; |
| 151 | int i; |
| 152 | unsigned long flags; |
| 153 | |
| 154 | D(printk("tlb: flush range %p<->%p in context %d (%p)\n", |
| 155 | start, end, page_id, mm)); |
| 156 | |
| 157 | if(page_id == NO_CONTEXT) |
| 158 | return; |
| 159 | |
| 160 | start &= PAGE_MASK; /* probably not necessary */ |
| 161 | end &= PAGE_MASK; /* dito */ |
| 162 | |
| 163 | /* invalidate those TLB entries that match both the mm context |
| 164 | * and the virtual address range |
| 165 | */ |
| 166 | |
| 167 | local_save_flags(flags); |
| 168 | local_irq_disable(); |
| 169 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 170 | unsigned long tlb_hi, vpn; |
| 171 | *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); |
| 172 | tlb_hi = *R_TLB_HI; |
| 173 | vpn = tlb_hi & PAGE_MASK; |
| 174 | if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && |
| 175 | vpn >= start && vpn < end) { |
| 176 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 177 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); |
| 178 | |
| 179 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 180 | IO_STATE(R_TLB_LO, valid, no ) | |
| 181 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 182 | IO_STATE(R_TLB_LO, we, no ) | |
| 183 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 184 | } |
| 185 | } |
| 186 | local_irq_restore(flags); |
| 187 | } |
| 188 | |
| 189 | /* dump the entire TLB for debug purposes */ |
| 190 | |
| 191 | #if 0 |
| 192 | void |
| 193 | dump_tlb_all(void) |
| 194 | { |
| 195 | int i; |
| 196 | unsigned long flags; |
| 197 | |
| 198 | printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n"); |
| 199 | |
| 200 | local_save_flags(flags); |
| 201 | local_irq_disable(); |
| 202 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 203 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); |
| 204 | printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n", |
| 205 | i, *R_TLB_HI, *R_TLB_LO); |
| 206 | } |
| 207 | local_irq_restore(flags); |
| 208 | } |
| 209 | #endif |
| 210 | |
| 211 | /* |
| 212 | * Initialize the context related info for a new mm_struct |
| 213 | * instance. |
| 214 | */ |
| 215 | |
| 216 | int |
| 217 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 218 | { |
| 219 | mm->context.page_id = NO_CONTEXT; |
| 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | /* called in schedule() just before actually doing the switch_to */ |
| 224 | |
| 225 | void |
| 226 | switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 227 | struct task_struct *tsk) |
| 228 | { |
| 229 | /* make sure we have a context */ |
| 230 | |
| 231 | get_mmu_context(next); |
| 232 | |
| 233 | /* remember the pgd for the fault handlers |
| 234 | * this is similar to the pgd register in some other CPU's. |
| 235 | * we need our own copy of it because current and active_mm |
| 236 | * might be invalid at points where we still need to derefer |
| 237 | * the pgd. |
| 238 | */ |
| 239 | |
| 240 | current_pgd = next->pgd; |
| 241 | |
| 242 | /* switch context in the MMU */ |
| 243 | |
| 244 | D(printk("switching mmu_context to %d (%p)\n", next->context, next)); |
| 245 | |
| 246 | *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id); |
| 247 | } |
| 248 | |