Konrad Eisele | 5213a78 | 2009-08-17 00:13:29 +0000 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/sparc/mm/leon_m.c |
| 3 | * |
| 4 | * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research |
| 5 | * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB |
| 6 | * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB |
| 7 | * |
| 8 | * do srmmu probe in software |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <asm/asi.h> |
| 15 | #include <asm/leon.h> |
| 16 | #include <asm/tlbflush.h> |
| 17 | |
Sam Ravnborg | accf032 | 2012-05-19 20:02:49 +0000 | [diff] [blame^] | 18 | #include "srmmu.h" |
| 19 | |
Konrad Eisele | 5213a78 | 2009-08-17 00:13:29 +0000 | [diff] [blame] | 20 | int leon_flush_during_switch = 1; |
| 21 | int srmmu_swprobe_trace; |
| 22 | |
| 23 | unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr) |
| 24 | { |
| 25 | |
| 26 | unsigned int ctxtbl; |
| 27 | unsigned int pgd, pmd, ped; |
| 28 | unsigned int ptr; |
| 29 | unsigned int lvl, pte, paddrbase; |
| 30 | unsigned int ctx; |
| 31 | unsigned int paddr_calc; |
| 32 | |
| 33 | paddrbase = 0; |
| 34 | |
| 35 | if (srmmu_swprobe_trace) |
| 36 | printk(KERN_INFO "swprobe: trace on\n"); |
| 37 | |
| 38 | ctxtbl = srmmu_get_ctable_ptr(); |
| 39 | if (!(ctxtbl)) { |
| 40 | if (srmmu_swprobe_trace) |
| 41 | printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n"); |
| 42 | return 0; |
| 43 | } |
| 44 | if (!_pfn_valid(PFN(ctxtbl))) { |
| 45 | if (srmmu_swprobe_trace) |
| 46 | printk(KERN_INFO |
| 47 | "swprobe: !_pfn_valid(%x)=>0\n", |
| 48 | PFN(ctxtbl)); |
| 49 | return 0; |
| 50 | } |
| 51 | |
| 52 | ctx = srmmu_get_context(); |
| 53 | if (srmmu_swprobe_trace) |
| 54 | printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx); |
| 55 | |
| 56 | pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4)); |
| 57 | |
| 58 | if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { |
| 59 | if (srmmu_swprobe_trace) |
| 60 | printk(KERN_INFO "swprobe: pgd is entry level 3\n"); |
| 61 | lvl = 3; |
| 62 | pte = pgd; |
| 63 | paddrbase = pgd & _SRMMU_PTE_PMASK_LEON; |
| 64 | goto ready; |
| 65 | } |
| 66 | if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { |
| 67 | if (srmmu_swprobe_trace) |
| 68 | printk(KERN_INFO "swprobe: pgd is invalid => 0\n"); |
| 69 | return 0; |
| 70 | } |
| 71 | |
| 72 | if (srmmu_swprobe_trace) |
| 73 | printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd); |
| 74 | |
| 75 | ptr = (pgd & SRMMU_PTD_PMASK) << 4; |
| 76 | ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4); |
| 77 | if (!_pfn_valid(PFN(ptr))) |
| 78 | return 0; |
| 79 | |
| 80 | pmd = LEON_BYPASS_LOAD_PA(ptr); |
| 81 | if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { |
| 82 | if (srmmu_swprobe_trace) |
| 83 | printk(KERN_INFO "swprobe: pmd is entry level 2\n"); |
| 84 | lvl = 2; |
| 85 | pte = pmd; |
| 86 | paddrbase = pmd & _SRMMU_PTE_PMASK_LEON; |
| 87 | goto ready; |
| 88 | } |
| 89 | if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { |
| 90 | if (srmmu_swprobe_trace) |
| 91 | printk(KERN_INFO "swprobe: pmd is invalid => 0\n"); |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | if (srmmu_swprobe_trace) |
| 96 | printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd); |
| 97 | |
| 98 | ptr = (pmd & SRMMU_PTD_PMASK) << 4; |
| 99 | ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4); |
| 100 | if (!_pfn_valid(PFN(ptr))) { |
| 101 | if (srmmu_swprobe_trace) |
| 102 | printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n", |
| 103 | PFN(ptr)); |
| 104 | return 0; |
| 105 | } |
| 106 | |
| 107 | ped = LEON_BYPASS_LOAD_PA(ptr); |
| 108 | |
| 109 | if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { |
| 110 | if (srmmu_swprobe_trace) |
| 111 | printk(KERN_INFO "swprobe: ped is entry level 1\n"); |
| 112 | lvl = 1; |
| 113 | pte = ped; |
| 114 | paddrbase = ped & _SRMMU_PTE_PMASK_LEON; |
| 115 | goto ready; |
| 116 | } |
| 117 | if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { |
| 118 | if (srmmu_swprobe_trace) |
| 119 | printk(KERN_INFO "swprobe: ped is invalid => 0\n"); |
| 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | if (srmmu_swprobe_trace) |
| 124 | printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped); |
| 125 | |
| 126 | ptr = (ped & SRMMU_PTD_PMASK) << 4; |
| 127 | ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4); |
| 128 | if (!_pfn_valid(PFN(ptr))) |
| 129 | return 0; |
| 130 | |
| 131 | ptr = LEON_BYPASS_LOAD_PA(ptr); |
| 132 | if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { |
| 133 | if (srmmu_swprobe_trace) |
| 134 | printk(KERN_INFO "swprobe: ptr is entry level 0\n"); |
| 135 | lvl = 0; |
| 136 | pte = ptr; |
| 137 | paddrbase = ptr & _SRMMU_PTE_PMASK_LEON; |
| 138 | goto ready; |
| 139 | } |
| 140 | if (srmmu_swprobe_trace) |
| 141 | printk(KERN_INFO "swprobe: ptr is invalid => 0\n"); |
| 142 | return 0; |
| 143 | |
| 144 | ready: |
| 145 | switch (lvl) { |
| 146 | case 0: |
| 147 | paddr_calc = |
| 148 | (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4); |
| 149 | break; |
| 150 | case 1: |
| 151 | paddr_calc = |
| 152 | (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4); |
| 153 | break; |
| 154 | case 2: |
| 155 | paddr_calc = |
| 156 | (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4); |
| 157 | break; |
| 158 | default: |
| 159 | case 3: |
| 160 | paddr_calc = vaddr; |
| 161 | break; |
| 162 | } |
| 163 | if (srmmu_swprobe_trace) |
| 164 | printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); |
| 165 | if (paddr) |
| 166 | *paddr = paddr_calc; |
Daniel Hellstrom | f22ed71 | 2011-09-08 03:11:15 +0000 | [diff] [blame] | 167 | return pte; |
Konrad Eisele | 5213a78 | 2009-08-17 00:13:29 +0000 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | void leon_flush_icache_all(void) |
| 171 | { |
| 172 | __asm__ __volatile__(" flush "); /*iflush*/ |
| 173 | } |
| 174 | |
| 175 | void leon_flush_dcache_all(void) |
| 176 | { |
| 177 | __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : |
| 178 | "i"(ASI_LEON_DFLUSH) : "memory"); |
| 179 | } |
| 180 | |
| 181 | void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page) |
| 182 | { |
| 183 | if (vma->vm_flags & VM_EXEC) |
| 184 | leon_flush_icache_all(); |
| 185 | leon_flush_dcache_all(); |
| 186 | } |
| 187 | |
| 188 | void leon_flush_cache_all(void) |
| 189 | { |
| 190 | __asm__ __volatile__(" flush "); /*iflush*/ |
| 191 | __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : |
| 192 | "i"(ASI_LEON_DFLUSH) : "memory"); |
| 193 | } |
| 194 | |
| 195 | void leon_flush_tlb_all(void) |
| 196 | { |
| 197 | leon_flush_cache_all(); |
| 198 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400), |
| 199 | "i"(ASI_LEON_MMUFLUSH) : "memory"); |
| 200 | } |
| 201 | |
| 202 | /* get all cache regs */ |
| 203 | void leon3_getCacheRegs(struct leon3_cacheregs *regs) |
| 204 | { |
| 205 | unsigned long ccr, iccr, dccr; |
| 206 | |
| 207 | if (!regs) |
| 208 | return; |
| 209 | /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */ |
| 210 | __asm__ __volatile__("lda [%%g0] %3, %0\n\t" |
| 211 | "mov 0x08, %%g1\n\t" |
| 212 | "lda [%%g1] %3, %1\n\t" |
| 213 | "mov 0x0c, %%g1\n\t" |
| 214 | "lda [%%g1] %3, %2\n\t" |
| 215 | : "=r"(ccr), "=r"(iccr), "=r"(dccr) |
| 216 | /* output */ |
| 217 | : "i"(ASI_LEON_CACHEREGS) /* input */ |
| 218 | : "g1" /* clobber list */ |
| 219 | ); |
| 220 | regs->ccr = ccr; |
| 221 | regs->iccr = iccr; |
| 222 | regs->dccr = dccr; |
| 223 | } |
| 224 | |
| 225 | /* Due to virtual cache we need to check cache configuration if |
| 226 | * it is possible to skip flushing in some cases. |
| 227 | * |
| 228 | * Leon2 and Leon3 differ in their way of telling cache information |
| 229 | * |
| 230 | */ |
Matthias Rosenfelder | 6d999da | 2011-06-13 07:04:05 +0000 | [diff] [blame] | 231 | int __init leon_flush_needed(void) |
Konrad Eisele | 5213a78 | 2009-08-17 00:13:29 +0000 | [diff] [blame] | 232 | { |
| 233 | int flush_needed = -1; |
| 234 | unsigned int ssize, sets; |
| 235 | char *setStr[4] = |
| 236 | { "direct mapped", "2-way associative", "3-way associative", |
| 237 | "4-way associative" |
| 238 | }; |
| 239 | /* leon 3 */ |
| 240 | struct leon3_cacheregs cregs; |
| 241 | leon3_getCacheRegs(&cregs); |
| 242 | sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24; |
| 243 | /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */ |
| 244 | ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20); |
| 245 | |
| 246 | printk(KERN_INFO "CACHE: %s cache, set size %dk\n", |
| 247 | sets > 3 ? "unknown" : setStr[sets], ssize); |
| 248 | if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) { |
| 249 | /* Set Size <= Page size ==> |
| 250 | flush on every context switch not needed. */ |
| 251 | flush_needed = 0; |
| 252 | printk(KERN_INFO "CACHE: not flushing on every context switch\n"); |
| 253 | } |
| 254 | return flush_needed; |
| 255 | } |
| 256 | |
| 257 | void leon_switch_mm(void) |
| 258 | { |
| 259 | flush_tlb_mm((void *)0); |
| 260 | if (leon_flush_during_switch) |
| 261 | leon_flush_cache_all(); |
| 262 | } |
Sam Ravnborg | accf032 | 2012-05-19 20:02:49 +0000 | [diff] [blame^] | 263 | |
| 264 | static void leon_flush_cache_mm(struct mm_struct *mm) |
| 265 | { |
| 266 | leon_flush_cache_all(); |
| 267 | } |
| 268 | |
| 269 | static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
| 270 | { |
| 271 | leon_flush_pcache_all(vma, page); |
| 272 | } |
| 273 | |
| 274 | static void leon_flush_cache_range(struct vm_area_struct *vma, |
| 275 | unsigned long start, |
| 276 | unsigned long end) |
| 277 | { |
| 278 | leon_flush_cache_all(); |
| 279 | } |
| 280 | |
| 281 | static void leon_flush_tlb_mm(struct mm_struct *mm) |
| 282 | { |
| 283 | leon_flush_tlb_all(); |
| 284 | } |
| 285 | |
| 286 | static void leon_flush_tlb_page(struct vm_area_struct *vma, |
| 287 | unsigned long page) |
| 288 | { |
| 289 | leon_flush_tlb_all(); |
| 290 | } |
| 291 | |
| 292 | static void leon_flush_tlb_range(struct vm_area_struct *vma, |
| 293 | unsigned long start, |
| 294 | unsigned long end) |
| 295 | { |
| 296 | leon_flush_tlb_all(); |
| 297 | } |
| 298 | |
| 299 | static void leon_flush_page_to_ram(unsigned long page) |
| 300 | { |
| 301 | leon_flush_cache_all(); |
| 302 | } |
| 303 | |
| 304 | static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page) |
| 305 | { |
| 306 | leon_flush_cache_all(); |
| 307 | } |
| 308 | |
| 309 | static void leon_flush_page_for_dma(unsigned long page) |
| 310 | { |
| 311 | leon_flush_dcache_all(); |
| 312 | } |
| 313 | |
| 314 | void __init poke_leonsparc(void) |
| 315 | { |
| 316 | } |
| 317 | |
| 318 | static const struct sparc32_cachetlb_ops leon_ops = { |
| 319 | .cache_all = leon_flush_cache_all, |
| 320 | .cache_mm = leon_flush_cache_mm, |
| 321 | .cache_page = leon_flush_cache_page, |
| 322 | .cache_range = leon_flush_cache_range, |
| 323 | .tlb_all = leon_flush_tlb_all, |
| 324 | .tlb_mm = leon_flush_tlb_mm, |
| 325 | .tlb_page = leon_flush_tlb_page, |
| 326 | .tlb_range = leon_flush_tlb_range, |
| 327 | .page_to_ram = leon_flush_page_to_ram, |
| 328 | .sig_insns = leon_flush_sig_insns, |
| 329 | .page_for_dma = leon_flush_page_for_dma, |
| 330 | }; |
| 331 | |
| 332 | void __init init_leon(void) |
| 333 | { |
| 334 | srmmu_name = "LEON"; |
| 335 | sparc32_cachetlb_ops = &leon_ops; |
| 336 | poke_srmmu = poke_leonsparc; |
| 337 | |
| 338 | leon_flush_during_switch = leon_flush_needed(); |
| 339 | } |