| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | 
|  | 3 | * Copyright (C) 2005 Mips Technologies, Inc | 
|  | 4 | */ | 
|  | 5 |  | 
|  | 6 | #include <linux/kernel.h> | 
|  | 7 | #include <linux/sched.h> | 
|  | 8 | #include <linux/cpumask.h> | 
|  | 9 | #include <linux/interrupt.h> | 
| Yoichi Yuasa | f72af3c | 2006-07-04 22:16:28 +0900 | [diff] [blame] | 10 | #include <linux/security.h> | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 11 |  | 
|  | 12 | #include <asm/cpu.h> | 
|  | 13 | #include <asm/processor.h> | 
|  | 14 | #include <asm/atomic.h> | 
|  | 15 | #include <asm/system.h> | 
|  | 16 | #include <asm/hardirq.h> | 
|  | 17 | #include <asm/mmu_context.h> | 
|  | 18 | #include <asm/smp.h> | 
|  | 19 | #include <asm/mipsmtregs.h> | 
|  | 20 | #include <asm/r4kcache.h> | 
|  | 21 | #include <asm/cacheflush.h> | 
|  | 22 |  | 
|  | 23 | /* | 
|  | 24 | * CPU mask used to set process affinity for MT VPEs/TCs with FPUs | 
|  | 25 | */ | 
|  | 26 |  | 
|  | 27 | cpumask_t mt_fpu_cpumask; | 
|  | 28 |  | 
|  | 29 | #ifdef CONFIG_MIPS_MT_FPAFF | 
|  | 30 |  | 
|  | 31 | #include <linux/cpu.h> | 
|  | 32 | #include <linux/delay.h> | 
|  | 33 | #include <asm/uaccess.h> | 
|  | 34 |  | 
|  | 35 | unsigned long mt_fpemul_threshold = 0; | 
|  | 36 |  | 
|  | 37 | /* | 
|  | 38 | * Replacement functions for the sys_sched_setaffinity() and | 
|  | 39 | * sys_sched_getaffinity() system calls, so that we can integrate | 
|  | 40 | * FPU affinity with the user's requested processor affinity. | 
|  | 41 | * This code is 98% identical with the sys_sched_setaffinity() | 
|  | 42 | * and sys_sched_getaffinity() system calls, and should be | 
|  | 43 | * updated when kernel/sched.c changes. | 
|  | 44 | */ | 
|  | 45 |  | 
|  | 46 | /* | 
|  | 47 | * find_process_by_pid - find a process with a matching PID value. | 
|  | 48 | * used in sys_sched_set/getaffinity() in kernel/sched.c, so | 
|  | 49 | * cloned here. | 
|  | 50 | */ | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 51 | static inline struct task_struct *find_process_by_pid(pid_t pid) | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 52 | { | 
|  | 53 | return pid ? find_task_by_pid(pid) : current; | 
|  | 54 | } | 
|  | 55 |  | 
|  | 56 |  | 
|  | 57 | /* | 
|  | 58 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process | 
|  | 59 | */ | 
|  | 60 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | 
|  | 61 | unsigned long __user *user_mask_ptr) | 
|  | 62 | { | 
|  | 63 | cpumask_t new_mask; | 
|  | 64 | cpumask_t effective_mask; | 
|  | 65 | int retval; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 66 | struct task_struct *p; | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 67 |  | 
|  | 68 | if (len < sizeof(new_mask)) | 
|  | 69 | return -EINVAL; | 
|  | 70 |  | 
|  | 71 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) | 
|  | 72 | return -EFAULT; | 
|  | 73 |  | 
|  | 74 | lock_cpu_hotplug(); | 
|  | 75 | read_lock(&tasklist_lock); | 
|  | 76 |  | 
|  | 77 | p = find_process_by_pid(pid); | 
|  | 78 | if (!p) { | 
|  | 79 | read_unlock(&tasklist_lock); | 
|  | 80 | unlock_cpu_hotplug(); | 
|  | 81 | return -ESRCH; | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * It is not safe to call set_cpus_allowed with the | 
|  | 86 | * tasklist_lock held.  We will bump the task_struct's | 
|  | 87 | * usage count and drop tasklist_lock before invoking | 
|  | 88 | * set_cpus_allowed. | 
|  | 89 | */ | 
|  | 90 | get_task_struct(p); | 
|  | 91 |  | 
|  | 92 | retval = -EPERM; | 
|  | 93 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 
|  | 94 | !capable(CAP_SYS_NICE)) { | 
|  | 95 | read_unlock(&tasklist_lock); | 
|  | 96 | goto out_unlock; | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | /* Record new user-specified CPU set for future reference */ | 
|  | 100 | p->thread.user_cpus_allowed = new_mask; | 
|  | 101 |  | 
|  | 102 | /* Unlock the task list */ | 
|  | 103 | read_unlock(&tasklist_lock); | 
|  | 104 |  | 
|  | 105 | /* Compute new global allowed CPU set if necessary */ | 
|  | 106 | if( (p->thread.mflags & MF_FPUBOUND) | 
|  | 107 | && cpus_intersects(new_mask, mt_fpu_cpumask)) { | 
|  | 108 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | 
|  | 109 | retval = set_cpus_allowed(p, effective_mask); | 
|  | 110 | } else { | 
|  | 111 | p->thread.mflags &= ~MF_FPUBOUND; | 
|  | 112 | retval = set_cpus_allowed(p, new_mask); | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 |  | 
|  | 116 | out_unlock: | 
|  | 117 | put_task_struct(p); | 
|  | 118 | unlock_cpu_hotplug(); | 
|  | 119 | return retval; | 
|  | 120 | } | 
|  | 121 |  | 
|  | 122 | /* | 
|  | 123 | * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process | 
|  | 124 | */ | 
|  | 125 | asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | 
|  | 126 | unsigned long __user *user_mask_ptr) | 
|  | 127 | { | 
|  | 128 | unsigned int real_len; | 
|  | 129 | cpumask_t mask; | 
|  | 130 | int retval; | 
| Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 131 | struct task_struct *p; | 
| Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 132 |  | 
|  | 133 | real_len = sizeof(mask); | 
|  | 134 | if (len < real_len) | 
|  | 135 | return -EINVAL; | 
|  | 136 |  | 
|  | 137 | lock_cpu_hotplug(); | 
|  | 138 | read_lock(&tasklist_lock); | 
|  | 139 |  | 
|  | 140 | retval = -ESRCH; | 
|  | 141 | p = find_process_by_pid(pid); | 
|  | 142 | if (!p) | 
|  | 143 | goto out_unlock; | 
|  | 144 |  | 
|  | 145 | retval = 0; | 
|  | 146 |  | 
|  | 147 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); | 
|  | 148 |  | 
|  | 149 | out_unlock: | 
|  | 150 | read_unlock(&tasklist_lock); | 
|  | 151 | unlock_cpu_hotplug(); | 
|  | 152 | if (retval) | 
|  | 153 | return retval; | 
|  | 154 | if (copy_to_user(user_mask_ptr, &mask, real_len)) | 
|  | 155 | return -EFAULT; | 
|  | 156 | return real_len; | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 
|  | 160 |  | 
|  | 161 | /* | 
|  | 162 | * Dump new MIPS MT state for the core. Does not leave TCs halted. | 
|  | 163 | * Takes an argument which taken to be a pre-call MVPControl value. | 
|  | 164 | */ | 
|  | 165 |  | 
|  | 166 | void mips_mt_regdump(unsigned long mvpctl) | 
|  | 167 | { | 
|  | 168 | unsigned long flags; | 
|  | 169 | unsigned long vpflags; | 
|  | 170 | unsigned long mvpconf0; | 
|  | 171 | int nvpe; | 
|  | 172 | int ntc; | 
|  | 173 | int i; | 
|  | 174 | int tc; | 
|  | 175 | unsigned long haltval; | 
|  | 176 | unsigned long tcstatval; | 
|  | 177 | #ifdef CONFIG_MIPS_MT_SMTC | 
|  | 178 | void smtc_soft_dump(void); | 
|  | 179 | #endif /* CONFIG_MIPT_MT_SMTC */ | 
|  | 180 |  | 
|  | 181 | local_irq_save(flags); | 
|  | 182 | vpflags = dvpe(); | 
|  | 183 | printk("=== MIPS MT State Dump ===\n"); | 
|  | 184 | printk("-- Global State --\n"); | 
|  | 185 | printk("   MVPControl Passed: %08lx\n", mvpctl); | 
|  | 186 | printk("   MVPControl Read: %08lx\n", vpflags); | 
|  | 187 | printk("   MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); | 
|  | 188 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | 
|  | 189 | ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | 
|  | 190 | printk("-- per-VPE State --\n"); | 
|  | 191 | for(i = 0; i < nvpe; i++) { | 
|  | 192 | for(tc = 0; tc < ntc; tc++) { | 
|  | 193 | settc(tc); | 
|  | 194 | if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { | 
|  | 195 | printk("  VPE %d\n", i); | 
|  | 196 | printk("   VPEControl : %08lx\n", read_vpe_c0_vpecontrol()); | 
|  | 197 | printk("   VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0()); | 
|  | 198 | printk("   VPE%d.Status : %08lx\n", | 
|  | 199 | i, read_vpe_c0_status()); | 
|  | 200 | printk("   VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc()); | 
|  | 201 | printk("   VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause()); | 
|  | 202 | printk("   VPE%d.Config7 : %08lx\n", | 
|  | 203 | i, read_vpe_c0_config7()); | 
|  | 204 | break; /* Next VPE */ | 
|  | 205 | } | 
|  | 206 | } | 
|  | 207 | } | 
|  | 208 | printk("-- per-TC State --\n"); | 
|  | 209 | for(tc = 0; tc < ntc; tc++) { | 
|  | 210 | settc(tc); | 
|  | 211 | if(read_tc_c0_tcbind() == read_c0_tcbind()) { | 
|  | 212 | /* Are we dumping ourself?  */ | 
|  | 213 | haltval = 0; /* Then we're not halted, and mustn't be */ | 
|  | 214 | tcstatval = flags; /* And pre-dump TCStatus is flags */ | 
|  | 215 | printk("  TC %d (current TC with VPE EPC above)\n", tc); | 
|  | 216 | } else { | 
|  | 217 | haltval = read_tc_c0_tchalt(); | 
|  | 218 | write_tc_c0_tchalt(1); | 
|  | 219 | tcstatval = read_tc_c0_tcstatus(); | 
|  | 220 | printk("  TC %d\n", tc); | 
|  | 221 | } | 
|  | 222 | printk("   TCStatus : %08lx\n", tcstatval); | 
|  | 223 | printk("   TCBind : %08lx\n", read_tc_c0_tcbind()); | 
|  | 224 | printk("   TCRestart : %08lx\n", read_tc_c0_tcrestart()); | 
|  | 225 | printk("   TCHalt : %08lx\n", haltval); | 
|  | 226 | printk("   TCContext : %08lx\n", read_tc_c0_tccontext()); | 
|  | 227 | if (!haltval) | 
|  | 228 | write_tc_c0_tchalt(0); | 
|  | 229 | } | 
|  | 230 | #ifdef CONFIG_MIPS_MT_SMTC | 
|  | 231 | smtc_soft_dump(); | 
|  | 232 | #endif /* CONFIG_MIPT_MT_SMTC */ | 
|  | 233 | printk("===========================\n"); | 
|  | 234 | evpe(vpflags); | 
|  | 235 | local_irq_restore(flags); | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | static int mt_opt_norps = 0; | 
|  | 239 | static int mt_opt_rpsctl = -1; | 
|  | 240 | static int mt_opt_nblsu = -1; | 
|  | 241 | static int mt_opt_forceconfig7 = 0; | 
|  | 242 | static int mt_opt_config7 = -1; | 
|  | 243 |  | 
|  | 244 | static int __init rps_disable(char *s) | 
|  | 245 | { | 
|  | 246 | mt_opt_norps = 1; | 
|  | 247 | return 1; | 
|  | 248 | } | 
|  | 249 | __setup("norps", rps_disable); | 
|  | 250 |  | 
|  | 251 | static int __init rpsctl_set(char *str) | 
|  | 252 | { | 
|  | 253 | get_option(&str, &mt_opt_rpsctl); | 
|  | 254 | return 1; | 
|  | 255 | } | 
|  | 256 | __setup("rpsctl=", rpsctl_set); | 
|  | 257 |  | 
|  | 258 | static int __init nblsu_set(char *str) | 
|  | 259 | { | 
|  | 260 | get_option(&str, &mt_opt_nblsu); | 
|  | 261 | return 1; | 
|  | 262 | } | 
|  | 263 | __setup("nblsu=", nblsu_set); | 
|  | 264 |  | 
|  | 265 | static int __init config7_set(char *str) | 
|  | 266 | { | 
|  | 267 | get_option(&str, &mt_opt_config7); | 
|  | 268 | mt_opt_forceconfig7 = 1; | 
|  | 269 | return 1; | 
|  | 270 | } | 
|  | 271 | __setup("config7=", config7_set); | 
|  | 272 |  | 
|  | 273 | /* Experimental cache flush control parameters that should go away some day */ | 
|  | 274 | int mt_protiflush = 0; | 
|  | 275 | int mt_protdflush = 0; | 
|  | 276 | int mt_n_iflushes = 1; | 
|  | 277 | int mt_n_dflushes = 1; | 
|  | 278 |  | 
|  | 279 | static int __init set_protiflush(char *s) | 
|  | 280 | { | 
|  | 281 | mt_protiflush = 1; | 
|  | 282 | return 1; | 
|  | 283 | } | 
|  | 284 | __setup("protiflush", set_protiflush); | 
|  | 285 |  | 
|  | 286 | static int __init set_protdflush(char *s) | 
|  | 287 | { | 
|  | 288 | mt_protdflush = 1; | 
|  | 289 | return 1; | 
|  | 290 | } | 
|  | 291 | __setup("protdflush", set_protdflush); | 
|  | 292 |  | 
|  | 293 | static int __init niflush(char *s) | 
|  | 294 | { | 
|  | 295 | get_option(&s, &mt_n_iflushes); | 
|  | 296 | return 1; | 
|  | 297 | } | 
|  | 298 | __setup("niflush=", niflush); | 
|  | 299 |  | 
|  | 300 | static int __init ndflush(char *s) | 
|  | 301 | { | 
|  | 302 | get_option(&s, &mt_n_dflushes); | 
|  | 303 | return 1; | 
|  | 304 | } | 
|  | 305 | __setup("ndflush=", ndflush); | 
|  | 306 | #ifdef CONFIG_MIPS_MT_FPAFF | 
|  | 307 | static int fpaff_threshold = -1; | 
|  | 308 |  | 
|  | 309 | static int __init fpaff_thresh(char *str) | 
|  | 310 | { | 
|  | 311 | get_option(&str, &fpaff_threshold); | 
|  | 312 | return 1; | 
|  | 313 | } | 
|  | 314 |  | 
|  | 315 | __setup("fpaff=", fpaff_thresh); | 
|  | 316 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 
|  | 317 |  | 
|  | 318 | static unsigned int itc_base = 0; | 
|  | 319 |  | 
|  | 320 | static int __init set_itc_base(char *str) | 
|  | 321 | { | 
|  | 322 | get_option(&str, &itc_base); | 
|  | 323 | return 1; | 
|  | 324 | } | 
|  | 325 |  | 
|  | 326 | __setup("itcbase=", set_itc_base); | 
|  | 327 |  | 
|  | 328 | void mips_mt_set_cpuoptions(void) | 
|  | 329 | { | 
|  | 330 | unsigned int oconfig7 = read_c0_config7(); | 
|  | 331 | unsigned int nconfig7 = oconfig7; | 
|  | 332 |  | 
|  | 333 | if (mt_opt_norps) { | 
|  | 334 | printk("\"norps\" option deprectated: use \"rpsctl=\"\n"); | 
|  | 335 | } | 
|  | 336 | if (mt_opt_rpsctl >= 0) { | 
|  | 337 | printk("34K return prediction stack override set to %d.\n", | 
|  | 338 | mt_opt_rpsctl); | 
|  | 339 | if (mt_opt_rpsctl) | 
|  | 340 | nconfig7 |= (1 << 2); | 
|  | 341 | else | 
|  | 342 | nconfig7 &= ~(1 << 2); | 
|  | 343 | } | 
|  | 344 | if (mt_opt_nblsu >= 0) { | 
|  | 345 | printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu); | 
|  | 346 | if (mt_opt_nblsu) | 
|  | 347 | nconfig7 |= (1 << 5); | 
|  | 348 | else | 
|  | 349 | nconfig7 &= ~(1 << 5); | 
|  | 350 | } | 
|  | 351 | if (mt_opt_forceconfig7) { | 
|  | 352 | printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7); | 
|  | 353 | nconfig7 = mt_opt_config7; | 
|  | 354 | } | 
|  | 355 | if (oconfig7 != nconfig7) { | 
|  | 356 | __asm__ __volatile("sync"); | 
|  | 357 | write_c0_config7(nconfig7); | 
|  | 358 | ehb (); | 
|  | 359 | printk("Config7: 0x%08x\n", read_c0_config7()); | 
|  | 360 | } | 
|  | 361 |  | 
|  | 362 | /* Report Cache management debug options */ | 
|  | 363 | if (mt_protiflush) | 
|  | 364 | printk("I-cache flushes single-threaded\n"); | 
|  | 365 | if (mt_protdflush) | 
|  | 366 | printk("D-cache flushes single-threaded\n"); | 
|  | 367 | if (mt_n_iflushes != 1) | 
|  | 368 | printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes); | 
|  | 369 | if (mt_n_dflushes != 1) | 
|  | 370 | printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); | 
|  | 371 |  | 
|  | 372 | #ifdef CONFIG_MIPS_MT_FPAFF | 
|  | 373 | /* FPU Use Factor empirically derived from experiments on 34K */ | 
|  | 374 | #define FPUSEFACTOR 333 | 
|  | 375 |  | 
|  | 376 | if (fpaff_threshold >= 0) { | 
|  | 377 | mt_fpemul_threshold = fpaff_threshold; | 
|  | 378 | } else { | 
|  | 379 | mt_fpemul_threshold = | 
|  | 380 | (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; | 
|  | 381 | } | 
|  | 382 | printk("FPU Affinity set after %ld emulations\n", | 
|  | 383 | mt_fpemul_threshold); | 
|  | 384 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 
|  | 385 |  | 
|  | 386 | if (itc_base != 0) { | 
|  | 387 | /* | 
|  | 388 | * Configure ITC mapping.  This code is very | 
|  | 389 | * specific to the 34K core family, which uses | 
|  | 390 | * a special mode bit ("ITC") in the ErrCtl | 
|  | 391 | * register to enable access to ITC control | 
|  | 392 | * registers via cache "tag" operations. | 
|  | 393 | */ | 
|  | 394 | unsigned long ectlval; | 
|  | 395 | unsigned long itcblkgrn; | 
|  | 396 |  | 
|  | 397 | /* ErrCtl register is known as "ecc" to Linux */ | 
|  | 398 | ectlval = read_c0_ecc(); | 
|  | 399 | write_c0_ecc(ectlval | (0x1 << 26)); | 
|  | 400 | ehb(); | 
|  | 401 | #define INDEX_0 (0x80000000) | 
|  | 402 | #define INDEX_8 (0x80000008) | 
|  | 403 | /* Read "cache tag" for Dcache pseudo-index 8 */ | 
|  | 404 | cache_op(Index_Load_Tag_D, INDEX_8); | 
|  | 405 | ehb(); | 
|  | 406 | itcblkgrn = read_c0_dtaglo(); | 
|  | 407 | itcblkgrn &= 0xfffe0000; | 
|  | 408 | /* Set for 128 byte pitch of ITC cells */ | 
|  | 409 | itcblkgrn |= 0x00000c00; | 
|  | 410 | /* Stage in Tag register */ | 
|  | 411 | write_c0_dtaglo(itcblkgrn); | 
|  | 412 | ehb(); | 
|  | 413 | /* Write out to ITU with CACHE op */ | 
|  | 414 | cache_op(Index_Store_Tag_D, INDEX_8); | 
|  | 415 | /* Now set base address, and turn ITC on with 0x1 bit */ | 
|  | 416 | write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 ); | 
|  | 417 | ehb(); | 
|  | 418 | /* Write out to ITU with CACHE op */ | 
|  | 419 | cache_op(Index_Store_Tag_D, INDEX_0); | 
|  | 420 | write_c0_ecc(ectlval); | 
|  | 421 | ehb(); | 
|  | 422 | printk("Mapped %ld ITC cells starting at 0x%08x\n", | 
|  | 423 | ((itcblkgrn & 0x7fe00000) >> 20), itc_base); | 
|  | 424 | } | 
|  | 425 | } | 
|  | 426 |  | 
|  | 427 | /* | 
|  | 428 | * Function to protect cache flushes from concurrent execution | 
|  | 429 | * depends on MP software model chosen. | 
|  | 430 | */ | 
|  | 431 |  | 
|  | 432 | void mt_cflush_lockdown(void) | 
|  | 433 | { | 
|  | 434 | #ifdef CONFIG_MIPS_MT_SMTC | 
|  | 435 | void smtc_cflush_lockdown(void); | 
|  | 436 |  | 
|  | 437 | smtc_cflush_lockdown(); | 
|  | 438 | #endif /* CONFIG_MIPS_MT_SMTC */ | 
|  | 439 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ | 
|  | 440 | } | 
|  | 441 |  | 
|  | 442 | void mt_cflush_release(void) | 
|  | 443 | { | 
|  | 444 | #ifdef CONFIG_MIPS_MT_SMTC | 
|  | 445 | void smtc_cflush_release(void); | 
|  | 446 |  | 
|  | 447 | smtc_cflush_release(); | 
|  | 448 | #endif /* CONFIG_MIPS_MT_SMTC */ | 
|  | 449 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ | 
|  | 450 | } |