Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * AVR32 TLB operations |
| 3 | * |
| 4 | * Copyright (C) 2004-2006 Atmel Corporation |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/mm.h> |
| 11 | |
| 12 | #include <asm/mmu_context.h> |
| 13 | |
| 14 | #define _TLBEHI_I 0x100 |
| 15 | |
| 16 | void show_dtlb_entry(unsigned int index) |
| 17 | { |
| 18 | unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags; |
| 19 | |
| 20 | local_irq_save(flags); |
| 21 | mmucr_save = sysreg_read(MMUCR); |
| 22 | tlbehi_save = sysreg_read(TLBEHI); |
| 23 | mmucr = mmucr_save & 0x13; |
| 24 | mmucr |= index << 14; |
| 25 | sysreg_write(MMUCR, mmucr); |
| 26 | |
| 27 | asm volatile("tlbr" : : : "memory"); |
| 28 | cpu_sync_pipeline(); |
| 29 | |
| 30 | tlbehi = sysreg_read(TLBEHI); |
| 31 | tlbelo = sysreg_read(TLBELO); |
| 32 | |
| 33 | printk("%2u: %c %c %02x %05x %05x %o %o %c %c %c %c\n", |
| 34 | index, |
| 35 | (tlbehi & 0x200)?'1':'0', |
| 36 | (tlbelo & 0x100)?'1':'0', |
| 37 | (tlbehi & 0xff), |
| 38 | (tlbehi >> 12), (tlbelo >> 12), |
| 39 | (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, |
| 40 | (tlbelo & 0x200)?'1':'0', |
| 41 | (tlbelo & 0x080)?'1':'0', |
| 42 | (tlbelo & 0x001)?'1':'0', |
| 43 | (tlbelo & 0x002)?'1':'0'); |
| 44 | |
| 45 | sysreg_write(MMUCR, mmucr_save); |
| 46 | sysreg_write(TLBEHI, tlbehi_save); |
| 47 | cpu_sync_pipeline(); |
| 48 | local_irq_restore(flags); |
| 49 | } |
| 50 | |
| 51 | void dump_dtlb(void) |
| 52 | { |
| 53 | unsigned int i; |
| 54 | |
| 55 | printk("ID V G ASID VPN PFN AP SZ C B W D\n"); |
| 56 | for (i = 0; i < 32; i++) |
| 57 | show_dtlb_entry(i); |
| 58 | } |
| 59 | |
| 60 | static unsigned long last_mmucr; |
| 61 | |
| 62 | static inline void set_replacement_pointer(unsigned shift) |
| 63 | { |
| 64 | unsigned long mmucr, mmucr_save; |
| 65 | |
| 66 | mmucr = mmucr_save = sysreg_read(MMUCR); |
| 67 | |
| 68 | /* Does this mapping already exist? */ |
| 69 | __asm__ __volatile__( |
| 70 | " tlbs\n" |
| 71 | " mfsr %0, %1" |
| 72 | : "=r"(mmucr) |
| 73 | : "i"(SYSREG_MMUCR)); |
| 74 | |
| 75 | if (mmucr & SYSREG_BIT(MMUCR_N)) { |
| 76 | /* Not found -- pick a not-recently-accessed entry */ |
| 77 | unsigned long rp; |
| 78 | unsigned long tlbar = sysreg_read(TLBARLO); |
| 79 | |
| 80 | rp = 32 - fls(tlbar); |
| 81 | if (rp == 32) { |
| 82 | rp = 0; |
| 83 | sysreg_write(TLBARLO, -1L); |
| 84 | } |
| 85 | |
| 86 | mmucr &= 0x13; |
| 87 | mmucr |= (rp << shift); |
| 88 | |
| 89 | sysreg_write(MMUCR, mmucr); |
| 90 | } |
| 91 | |
| 92 | last_mmucr = mmucr; |
| 93 | } |
| 94 | |
| 95 | static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid) |
| 96 | { |
| 97 | unsigned long vpn; |
| 98 | |
| 99 | vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid; |
| 100 | sysreg_write(TLBEHI, vpn); |
| 101 | cpu_sync_pipeline(); |
| 102 | |
| 103 | set_replacement_pointer(14); |
| 104 | |
| 105 | sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); |
| 106 | |
| 107 | /* Let's go */ |
| 108 | asm volatile("nop\n\ttlbw" : : : "memory"); |
| 109 | cpu_sync_pipeline(); |
| 110 | } |
| 111 | |
| 112 | void update_mmu_cache(struct vm_area_struct *vma, |
| 113 | unsigned long address, pte_t pte) |
| 114 | { |
| 115 | unsigned long flags; |
| 116 | |
| 117 | /* ptrace may call this routine */ |
| 118 | if (vma && current->active_mm != vma->vm_mm) |
| 119 | return; |
| 120 | |
| 121 | local_irq_save(flags); |
| 122 | update_dtlb(address, pte, get_asid()); |
| 123 | local_irq_restore(flags); |
| 124 | } |
| 125 | |
| 126 | void __flush_tlb_page(unsigned long asid, unsigned long page) |
| 127 | { |
| 128 | unsigned long mmucr, tlbehi; |
| 129 | |
| 130 | page |= asid; |
| 131 | sysreg_write(TLBEHI, page); |
| 132 | cpu_sync_pipeline(); |
| 133 | asm volatile("tlbs"); |
| 134 | mmucr = sysreg_read(MMUCR); |
| 135 | |
| 136 | if (!(mmucr & SYSREG_BIT(MMUCR_N))) { |
| 137 | unsigned long tlbarlo; |
| 138 | unsigned long entry; |
| 139 | |
| 140 | /* Clear the "valid" bit */ |
| 141 | tlbehi = sysreg_read(TLBEHI); |
| 142 | tlbehi &= ~_TLBEHI_VALID; |
| 143 | sysreg_write(TLBEHI, tlbehi); |
| 144 | cpu_sync_pipeline(); |
| 145 | |
| 146 | /* mark the entry as "not accessed" */ |
| 147 | entry = (mmucr >> 14) & 0x3f; |
| 148 | tlbarlo = sysreg_read(TLBARLO); |
| 149 | tlbarlo |= (0x80000000 >> entry); |
| 150 | sysreg_write(TLBARLO, tlbarlo); |
| 151 | |
| 152 | /* update the entry with valid bit clear */ |
| 153 | asm volatile("tlbw"); |
| 154 | cpu_sync_pipeline(); |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
| 159 | { |
| 160 | if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { |
| 161 | unsigned long flags, asid; |
| 162 | unsigned long saved_asid = MMU_NO_ASID; |
| 163 | |
| 164 | asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; |
| 165 | page &= PAGE_MASK; |
| 166 | |
| 167 | local_irq_save(flags); |
| 168 | if (vma->vm_mm != current->mm) { |
| 169 | saved_asid = get_asid(); |
| 170 | set_asid(asid); |
| 171 | } |
| 172 | |
| 173 | __flush_tlb_page(asid, page); |
| 174 | |
| 175 | if (saved_asid != MMU_NO_ASID) |
| 176 | set_asid(saved_asid); |
| 177 | local_irq_restore(flags); |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 182 | unsigned long end) |
| 183 | { |
| 184 | struct mm_struct *mm = vma->vm_mm; |
| 185 | |
| 186 | if (mm->context != NO_CONTEXT) { |
| 187 | unsigned long flags; |
| 188 | int size; |
| 189 | |
| 190 | local_irq_save(flags); |
| 191 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 192 | if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ |
| 193 | mm->context = NO_CONTEXT; |
| 194 | if (mm == current->mm) |
| 195 | activate_context(mm); |
| 196 | } else { |
| 197 | unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK; |
| 198 | unsigned long saved_asid = MMU_NO_ASID; |
| 199 | |
| 200 | start &= PAGE_MASK; |
| 201 | end += (PAGE_SIZE - 1); |
| 202 | end &= PAGE_MASK; |
| 203 | if (mm != current->mm) { |
| 204 | saved_asid = get_asid(); |
| 205 | set_asid(asid); |
| 206 | } |
| 207 | |
| 208 | while (start < end) { |
| 209 | __flush_tlb_page(asid, start); |
| 210 | start += PAGE_SIZE; |
| 211 | } |
| 212 | if (saved_asid != MMU_NO_ASID) |
| 213 | set_asid(saved_asid); |
| 214 | } |
| 215 | local_irq_restore(flags); |
| 216 | } |
| 217 | } |
| 218 | |
| 219 | /* |
| 220 | * TODO: If this is only called for addresses > TASK_SIZE, we can probably |
| 221 | * skip the ASID stuff and just use the Global bit... |
| 222 | */ |
| 223 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 224 | { |
| 225 | unsigned long flags; |
| 226 | int size; |
| 227 | |
| 228 | local_irq_save(flags); |
| 229 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 230 | if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ |
| 231 | flush_tlb_all(); |
| 232 | } else { |
| 233 | unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK; |
| 234 | unsigned long saved_asid = get_asid(); |
| 235 | |
| 236 | start &= PAGE_MASK; |
| 237 | end += (PAGE_SIZE - 1); |
| 238 | end &= PAGE_MASK; |
| 239 | set_asid(asid); |
| 240 | while (start < end) { |
| 241 | __flush_tlb_page(asid, start); |
| 242 | start += PAGE_SIZE; |
| 243 | } |
| 244 | set_asid(saved_asid); |
| 245 | } |
| 246 | local_irq_restore(flags); |
| 247 | } |
| 248 | |
| 249 | void flush_tlb_mm(struct mm_struct *mm) |
| 250 | { |
| 251 | /* Invalidate all TLB entries of this process by getting a new ASID */ |
| 252 | if (mm->context != NO_CONTEXT) { |
| 253 | unsigned long flags; |
| 254 | |
| 255 | local_irq_save(flags); |
| 256 | mm->context = NO_CONTEXT; |
| 257 | if (mm == current->mm) |
| 258 | activate_context(mm); |
| 259 | local_irq_restore(flags); |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | void flush_tlb_all(void) |
| 264 | { |
| 265 | unsigned long flags; |
| 266 | |
| 267 | local_irq_save(flags); |
| 268 | sysreg_write(MMUCR, sysreg_read(MMUCR) | SYSREG_BIT(MMUCR_I)); |
| 269 | local_irq_restore(flags); |
| 270 | } |
| 271 | |
| 272 | #ifdef CONFIG_PROC_FS |
| 273 | |
| 274 | #include <linux/seq_file.h> |
| 275 | #include <linux/proc_fs.h> |
| 276 | #include <linux/init.h> |
| 277 | |
| 278 | static void *tlb_start(struct seq_file *tlb, loff_t *pos) |
| 279 | { |
| 280 | static unsigned long tlb_index; |
| 281 | |
| 282 | if (*pos >= 32) |
| 283 | return NULL; |
| 284 | |
| 285 | tlb_index = 0; |
| 286 | return &tlb_index; |
| 287 | } |
| 288 | |
| 289 | static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos) |
| 290 | { |
| 291 | unsigned long *index = v; |
| 292 | |
| 293 | if (*index >= 31) |
| 294 | return NULL; |
| 295 | |
| 296 | ++*pos; |
| 297 | ++*index; |
| 298 | return index; |
| 299 | } |
| 300 | |
| 301 | static void tlb_stop(struct seq_file *tlb, void *v) |
| 302 | { |
| 303 | |
| 304 | } |
| 305 | |
| 306 | static int tlb_show(struct seq_file *tlb, void *v) |
| 307 | { |
| 308 | unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save, flags; |
| 309 | unsigned long *index = v; |
| 310 | |
| 311 | if (*index == 0) |
| 312 | seq_puts(tlb, "ID V G ASID VPN PFN AP SZ C B W D\n"); |
| 313 | |
| 314 | BUG_ON(*index >= 32); |
| 315 | |
| 316 | local_irq_save(flags); |
| 317 | mmucr_save = sysreg_read(MMUCR); |
| 318 | tlbehi_save = sysreg_read(TLBEHI); |
| 319 | mmucr = mmucr_save & 0x13; |
| 320 | mmucr |= *index << 14; |
| 321 | sysreg_write(MMUCR, mmucr); |
| 322 | |
| 323 | asm volatile("tlbr" : : : "memory"); |
| 324 | cpu_sync_pipeline(); |
| 325 | |
| 326 | tlbehi = sysreg_read(TLBEHI); |
| 327 | tlbelo = sysreg_read(TLBELO); |
| 328 | |
| 329 | sysreg_write(MMUCR, mmucr_save); |
| 330 | sysreg_write(TLBEHI, tlbehi_save); |
| 331 | cpu_sync_pipeline(); |
| 332 | local_irq_restore(flags); |
| 333 | |
| 334 | seq_printf(tlb, "%2lu: %c %c %02x %05x %05x %o %o %c %c %c %c\n", |
| 335 | *index, |
| 336 | (tlbehi & 0x200)?'1':'0', |
| 337 | (tlbelo & 0x100)?'1':'0', |
| 338 | (tlbehi & 0xff), |
| 339 | (tlbehi >> 12), (tlbelo >> 12), |
| 340 | (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, |
| 341 | (tlbelo & 0x200)?'1':'0', |
| 342 | (tlbelo & 0x080)?'1':'0', |
| 343 | (tlbelo & 0x001)?'1':'0', |
| 344 | (tlbelo & 0x002)?'1':'0'); |
| 345 | |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | static struct seq_operations tlb_ops = { |
| 350 | .start = tlb_start, |
| 351 | .next = tlb_next, |
| 352 | .stop = tlb_stop, |
| 353 | .show = tlb_show, |
| 354 | }; |
| 355 | |
| 356 | static int tlb_open(struct inode *inode, struct file *file) |
| 357 | { |
| 358 | return seq_open(file, &tlb_ops); |
| 359 | } |
| 360 | |
| 361 | static struct file_operations proc_tlb_operations = { |
| 362 | .open = tlb_open, |
| 363 | .read = seq_read, |
| 364 | .llseek = seq_lseek, |
| 365 | .release = seq_release, |
| 366 | }; |
| 367 | |
| 368 | static int __init proctlb_init(void) |
| 369 | { |
| 370 | struct proc_dir_entry *entry; |
| 371 | |
| 372 | entry = create_proc_entry("tlb", 0, NULL); |
| 373 | if (entry) |
| 374 | entry->proc_fops = &proc_tlb_operations; |
| 375 | return 0; |
| 376 | } |
| 377 | late_initcall(proctlb_init); |
| 378 | #endif /* CONFIG_PROC_FS */ |