| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * AVR32 TLB operations | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2004-2006 Atmel Corporation | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | */ | 
|  | 10 | #include <linux/mm.h> | 
|  | 11 |  | 
|  | 12 | #include <asm/mmu_context.h> | 
|  | 13 |  | 
|  | 14 | #define _TLBEHI_I	0x100 | 
|  | 15 |  | 
|  | 16 | void show_dtlb_entry(unsigned int index) | 
|  | 17 | { | 
| Haavard Skinnemoen | 361f6ed | 2006-09-27 01:50:14 -0700 | [diff] [blame] | 18 | unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; | 
|  | 19 | unsigned long flags; | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 20 |  | 
|  | 21 | local_irq_save(flags); | 
|  | 22 | mmucr_save = sysreg_read(MMUCR); | 
|  | 23 | tlbehi_save = sysreg_read(TLBEHI); | 
|  | 24 | mmucr = mmucr_save & 0x13; | 
|  | 25 | mmucr |= index << 14; | 
|  | 26 | sysreg_write(MMUCR, mmucr); | 
|  | 27 |  | 
|  | 28 | asm volatile("tlbr" : : : "memory"); | 
|  | 29 | cpu_sync_pipeline(); | 
|  | 30 |  | 
|  | 31 | tlbehi = sysreg_read(TLBEHI); | 
|  | 32 | tlbelo = sysreg_read(TLBELO); | 
|  | 33 |  | 
|  | 34 | printk("%2u: %c %c %02x   %05x %05x %o  %o  %c %c %c %c\n", | 
|  | 35 | index, | 
|  | 36 | (tlbehi & 0x200)?'1':'0', | 
|  | 37 | (tlbelo & 0x100)?'1':'0', | 
|  | 38 | (tlbehi & 0xff), | 
|  | 39 | (tlbehi >> 12), (tlbelo >> 12), | 
|  | 40 | (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, | 
|  | 41 | (tlbelo & 0x200)?'1':'0', | 
|  | 42 | (tlbelo & 0x080)?'1':'0', | 
|  | 43 | (tlbelo & 0x001)?'1':'0', | 
|  | 44 | (tlbelo & 0x002)?'1':'0'); | 
|  | 45 |  | 
|  | 46 | sysreg_write(MMUCR, mmucr_save); | 
|  | 47 | sysreg_write(TLBEHI, tlbehi_save); | 
|  | 48 | cpu_sync_pipeline(); | 
|  | 49 | local_irq_restore(flags); | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | void dump_dtlb(void) | 
|  | 53 | { | 
|  | 54 | unsigned int i; | 
|  | 55 |  | 
|  | 56 | printk("ID  V G ASID VPN   PFN   AP SZ C B W D\n"); | 
|  | 57 | for (i = 0; i < 32; i++) | 
|  | 58 | show_dtlb_entry(i); | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 | static unsigned long last_mmucr; | 
|  | 62 |  | 
|  | 63 | static inline void set_replacement_pointer(unsigned shift) | 
|  | 64 | { | 
|  | 65 | unsigned long mmucr, mmucr_save; | 
|  | 66 |  | 
|  | 67 | mmucr = mmucr_save = sysreg_read(MMUCR); | 
|  | 68 |  | 
|  | 69 | /* Does this mapping already exist? */ | 
|  | 70 | __asm__ __volatile__( | 
|  | 71 | "	tlbs\n" | 
|  | 72 | "	mfsr %0, %1" | 
|  | 73 | : "=r"(mmucr) | 
|  | 74 | : "i"(SYSREG_MMUCR)); | 
|  | 75 |  | 
|  | 76 | if (mmucr & SYSREG_BIT(MMUCR_N)) { | 
|  | 77 | /* Not found -- pick a not-recently-accessed entry */ | 
|  | 78 | unsigned long rp; | 
|  | 79 | unsigned long tlbar = sysreg_read(TLBARLO); | 
|  | 80 |  | 
|  | 81 | rp = 32 - fls(tlbar); | 
|  | 82 | if (rp == 32) { | 
|  | 83 | rp = 0; | 
|  | 84 | sysreg_write(TLBARLO, -1L); | 
|  | 85 | } | 
|  | 86 |  | 
|  | 87 | mmucr &= 0x13; | 
|  | 88 | mmucr |= (rp << shift); | 
|  | 89 |  | 
|  | 90 | sysreg_write(MMUCR, mmucr); | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | last_mmucr = mmucr; | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | static void update_dtlb(unsigned long address, pte_t pte, unsigned long asid) | 
|  | 97 | { | 
|  | 98 | unsigned long vpn; | 
|  | 99 |  | 
|  | 100 | vpn = (address & MMU_VPN_MASK) | _TLBEHI_VALID | asid; | 
|  | 101 | sysreg_write(TLBEHI, vpn); | 
|  | 102 | cpu_sync_pipeline(); | 
|  | 103 |  | 
|  | 104 | set_replacement_pointer(14); | 
|  | 105 |  | 
|  | 106 | sysreg_write(TLBELO, pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK); | 
|  | 107 |  | 
|  | 108 | /* Let's go */ | 
|  | 109 | asm volatile("nop\n\ttlbw" : : : "memory"); | 
|  | 110 | cpu_sync_pipeline(); | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | void update_mmu_cache(struct vm_area_struct *vma, | 
|  | 114 | unsigned long address, pte_t pte) | 
|  | 115 | { | 
|  | 116 | unsigned long flags; | 
|  | 117 |  | 
|  | 118 | /* ptrace may call this routine */ | 
|  | 119 | if (vma && current->active_mm != vma->vm_mm) | 
|  | 120 | return; | 
|  | 121 |  | 
|  | 122 | local_irq_save(flags); | 
|  | 123 | update_dtlb(address, pte, get_asid()); | 
|  | 124 | local_irq_restore(flags); | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | void __flush_tlb_page(unsigned long asid, unsigned long page) | 
|  | 128 | { | 
|  | 129 | unsigned long mmucr, tlbehi; | 
|  | 130 |  | 
|  | 131 | page |= asid; | 
|  | 132 | sysreg_write(TLBEHI, page); | 
|  | 133 | cpu_sync_pipeline(); | 
|  | 134 | asm volatile("tlbs"); | 
|  | 135 | mmucr = sysreg_read(MMUCR); | 
|  | 136 |  | 
|  | 137 | if (!(mmucr & SYSREG_BIT(MMUCR_N))) { | 
|  | 138 | unsigned long tlbarlo; | 
|  | 139 | unsigned long entry; | 
|  | 140 |  | 
|  | 141 | /* Clear the "valid" bit */ | 
|  | 142 | tlbehi = sysreg_read(TLBEHI); | 
|  | 143 | tlbehi &= ~_TLBEHI_VALID; | 
|  | 144 | sysreg_write(TLBEHI, tlbehi); | 
|  | 145 | cpu_sync_pipeline(); | 
|  | 146 |  | 
|  | 147 | /* mark the entry as "not accessed" */ | 
|  | 148 | entry = (mmucr >> 14) & 0x3f; | 
|  | 149 | tlbarlo = sysreg_read(TLBARLO); | 
|  | 150 | tlbarlo |= (0x80000000 >> entry); | 
|  | 151 | sysreg_write(TLBARLO, tlbarlo); | 
|  | 152 |  | 
|  | 153 | /* update the entry with valid bit clear */ | 
|  | 154 | asm volatile("tlbw"); | 
|  | 155 | cpu_sync_pipeline(); | 
|  | 156 | } | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 
|  | 160 | { | 
|  | 161 | if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { | 
|  | 162 | unsigned long flags, asid; | 
|  | 163 | unsigned long saved_asid = MMU_NO_ASID; | 
|  | 164 |  | 
|  | 165 | asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; | 
|  | 166 | page &= PAGE_MASK; | 
|  | 167 |  | 
|  | 168 | local_irq_save(flags); | 
|  | 169 | if (vma->vm_mm != current->mm) { | 
|  | 170 | saved_asid = get_asid(); | 
|  | 171 | set_asid(asid); | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | __flush_tlb_page(asid, page); | 
|  | 175 |  | 
|  | 176 | if (saved_asid != MMU_NO_ASID) | 
|  | 177 | set_asid(saved_asid); | 
|  | 178 | local_irq_restore(flags); | 
|  | 179 | } | 
|  | 180 | } | 
|  | 181 |  | 
|  | 182 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 
|  | 183 | unsigned long end) | 
|  | 184 | { | 
|  | 185 | struct mm_struct *mm = vma->vm_mm; | 
|  | 186 |  | 
|  | 187 | if (mm->context != NO_CONTEXT) { | 
|  | 188 | unsigned long flags; | 
|  | 189 | int size; | 
|  | 190 |  | 
|  | 191 | local_irq_save(flags); | 
|  | 192 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 
|  | 193 | if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ | 
|  | 194 | mm->context = NO_CONTEXT; | 
|  | 195 | if (mm == current->mm) | 
|  | 196 | activate_context(mm); | 
|  | 197 | } else { | 
|  | 198 | unsigned long asid = mm->context & MMU_CONTEXT_ASID_MASK; | 
|  | 199 | unsigned long saved_asid = MMU_NO_ASID; | 
|  | 200 |  | 
|  | 201 | start &= PAGE_MASK; | 
|  | 202 | end += (PAGE_SIZE - 1); | 
|  | 203 | end &= PAGE_MASK; | 
|  | 204 | if (mm != current->mm) { | 
|  | 205 | saved_asid = get_asid(); | 
|  | 206 | set_asid(asid); | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | while (start < end) { | 
|  | 210 | __flush_tlb_page(asid, start); | 
|  | 211 | start += PAGE_SIZE; | 
|  | 212 | } | 
|  | 213 | if (saved_asid != MMU_NO_ASID) | 
|  | 214 | set_asid(saved_asid); | 
|  | 215 | } | 
|  | 216 | local_irq_restore(flags); | 
|  | 217 | } | 
|  | 218 | } | 
|  | 219 |  | 
|  | 220 | /* | 
|  | 221 | * TODO: If this is only called for addresses > TASK_SIZE, we can probably | 
|  | 222 | * skip the ASID stuff and just use the Global bit... | 
|  | 223 | */ | 
|  | 224 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 
|  | 225 | { | 
|  | 226 | unsigned long flags; | 
|  | 227 | int size; | 
|  | 228 |  | 
|  | 229 | local_irq_save(flags); | 
|  | 230 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 
|  | 231 | if (size > (MMU_DTLB_ENTRIES / 4)) { /* Too many entries to flush */ | 
|  | 232 | flush_tlb_all(); | 
|  | 233 | } else { | 
|  | 234 | unsigned long asid = init_mm.context & MMU_CONTEXT_ASID_MASK; | 
|  | 235 | unsigned long saved_asid = get_asid(); | 
|  | 236 |  | 
|  | 237 | start &= PAGE_MASK; | 
|  | 238 | end += (PAGE_SIZE - 1); | 
|  | 239 | end &= PAGE_MASK; | 
|  | 240 | set_asid(asid); | 
|  | 241 | while (start < end) { | 
|  | 242 | __flush_tlb_page(asid, start); | 
|  | 243 | start += PAGE_SIZE; | 
|  | 244 | } | 
|  | 245 | set_asid(saved_asid); | 
|  | 246 | } | 
|  | 247 | local_irq_restore(flags); | 
|  | 248 | } | 
|  | 249 |  | 
|  | 250 | void flush_tlb_mm(struct mm_struct *mm) | 
|  | 251 | { | 
|  | 252 | /* Invalidate all TLB entries of this process by getting a new ASID */ | 
|  | 253 | if (mm->context != NO_CONTEXT) { | 
|  | 254 | unsigned long flags; | 
|  | 255 |  | 
|  | 256 | local_irq_save(flags); | 
|  | 257 | mm->context = NO_CONTEXT; | 
|  | 258 | if (mm == current->mm) | 
|  | 259 | activate_context(mm); | 
|  | 260 | local_irq_restore(flags); | 
|  | 261 | } | 
|  | 262 | } | 
|  | 263 |  | 
|  | 264 | void flush_tlb_all(void) | 
|  | 265 | { | 
|  | 266 | unsigned long flags; | 
|  | 267 |  | 
|  | 268 | local_irq_save(flags); | 
|  | 269 | sysreg_write(MMUCR, sysreg_read(MMUCR) | SYSREG_BIT(MMUCR_I)); | 
|  | 270 | local_irq_restore(flags); | 
|  | 271 | } | 
|  | 272 |  | 
|  | 273 | #ifdef CONFIG_PROC_FS | 
|  | 274 |  | 
|  | 275 | #include <linux/seq_file.h> | 
|  | 276 | #include <linux/proc_fs.h> | 
|  | 277 | #include <linux/init.h> | 
|  | 278 |  | 
|  | 279 | static void *tlb_start(struct seq_file *tlb, loff_t *pos) | 
|  | 280 | { | 
|  | 281 | static unsigned long tlb_index; | 
|  | 282 |  | 
|  | 283 | if (*pos >= 32) | 
|  | 284 | return NULL; | 
|  | 285 |  | 
|  | 286 | tlb_index = 0; | 
|  | 287 | return &tlb_index; | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | static void *tlb_next(struct seq_file *tlb, void *v, loff_t *pos) | 
|  | 291 | { | 
|  | 292 | unsigned long *index = v; | 
|  | 293 |  | 
|  | 294 | if (*index >= 31) | 
|  | 295 | return NULL; | 
|  | 296 |  | 
|  | 297 | ++*pos; | 
|  | 298 | ++*index; | 
|  | 299 | return index; | 
|  | 300 | } | 
|  | 301 |  | 
|  | 302 | static void tlb_stop(struct seq_file *tlb, void *v) | 
|  | 303 | { | 
|  | 304 |  | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | static int tlb_show(struct seq_file *tlb, void *v) | 
|  | 308 | { | 
| Haavard Skinnemoen | 361f6ed | 2006-09-27 01:50:14 -0700 | [diff] [blame] | 309 | unsigned int tlbehi, tlbehi_save, tlbelo, mmucr, mmucr_save; | 
|  | 310 | unsigned long flags; | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 311 | unsigned long *index = v; | 
|  | 312 |  | 
|  | 313 | if (*index == 0) | 
|  | 314 | seq_puts(tlb, "ID  V G ASID VPN   PFN   AP SZ C B W D\n"); | 
|  | 315 |  | 
|  | 316 | BUG_ON(*index >= 32); | 
|  | 317 |  | 
|  | 318 | local_irq_save(flags); | 
|  | 319 | mmucr_save = sysreg_read(MMUCR); | 
|  | 320 | tlbehi_save = sysreg_read(TLBEHI); | 
|  | 321 | mmucr = mmucr_save & 0x13; | 
|  | 322 | mmucr |= *index << 14; | 
|  | 323 | sysreg_write(MMUCR, mmucr); | 
|  | 324 |  | 
|  | 325 | asm volatile("tlbr" : : : "memory"); | 
|  | 326 | cpu_sync_pipeline(); | 
|  | 327 |  | 
|  | 328 | tlbehi = sysreg_read(TLBEHI); | 
|  | 329 | tlbelo = sysreg_read(TLBELO); | 
|  | 330 |  | 
|  | 331 | sysreg_write(MMUCR, mmucr_save); | 
|  | 332 | sysreg_write(TLBEHI, tlbehi_save); | 
|  | 333 | cpu_sync_pipeline(); | 
|  | 334 | local_irq_restore(flags); | 
|  | 335 |  | 
|  | 336 | seq_printf(tlb, "%2lu: %c %c %02x   %05x %05x %o  %o  %c %c %c %c\n", | 
|  | 337 | *index, | 
|  | 338 | (tlbehi & 0x200)?'1':'0', | 
|  | 339 | (tlbelo & 0x100)?'1':'0', | 
|  | 340 | (tlbehi & 0xff), | 
|  | 341 | (tlbehi >> 12), (tlbelo >> 12), | 
|  | 342 | (tlbelo >> 4) & 7, (tlbelo >> 2) & 3, | 
|  | 343 | (tlbelo & 0x200)?'1':'0', | 
|  | 344 | (tlbelo & 0x080)?'1':'0', | 
|  | 345 | (tlbelo & 0x001)?'1':'0', | 
|  | 346 | (tlbelo & 0x002)?'1':'0'); | 
|  | 347 |  | 
|  | 348 | return 0; | 
|  | 349 | } | 
|  | 350 |  | 
|  | 351 | static struct seq_operations tlb_ops = { | 
|  | 352 | .start		= tlb_start, | 
|  | 353 | .next		= tlb_next, | 
|  | 354 | .stop		= tlb_stop, | 
|  | 355 | .show		= tlb_show, | 
|  | 356 | }; | 
|  | 357 |  | 
|  | 358 | static int tlb_open(struct inode *inode, struct file *file) | 
|  | 359 | { | 
|  | 360 | return seq_open(file, &tlb_ops); | 
|  | 361 | } | 
|  | 362 |  | 
| Arjan van de Ven | 5dfe4c9 | 2007-02-12 00:55:31 -0800 | [diff] [blame] | 363 | static const struct file_operations proc_tlb_operations = { | 
| Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 364 | .open		= tlb_open, | 
|  | 365 | .read		= seq_read, | 
|  | 366 | .llseek		= seq_lseek, | 
|  | 367 | .release	= seq_release, | 
|  | 368 | }; | 
|  | 369 |  | 
|  | 370 | static int __init proctlb_init(void) | 
|  | 371 | { | 
|  | 372 | struct proc_dir_entry *entry; | 
|  | 373 |  | 
|  | 374 | entry = create_proc_entry("tlb", 0, NULL); | 
|  | 375 | if (entry) | 
|  | 376 | entry->proc_fops = &proc_tlb_operations; | 
|  | 377 | return 0; | 
|  | 378 | } | 
|  | 379 | late_initcall(proctlb_init); | 
|  | 380 | #endif /* CONFIG_PROC_FS */ |