blob: cba17a8f53d0632afc81611f65d129f8de78c305 [file] [log] [blame]
Ralf Baechle41c594a2006-04-05 09:45:45 +01001/* Copyright (C) 2004 Mips Technologies, Inc */
2
3#include <linux/kernel.h>
4#include <linux/sched.h>
5#include <linux/cpumask.h>
6#include <linux/interrupt.h>
Ralf Baechleae036b72007-03-27 15:11:54 +01007#include <linux/kernel_stat.h>
Ralf Baechleec43c012007-01-24 19:23:21 +00008#include <linux/module.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +01009
10#include <asm/cpu.h>
11#include <asm/processor.h>
12#include <asm/atomic.h>
13#include <asm/system.h>
14#include <asm/hardirq.h>
15#include <asm/hazards.h>
16#include <asm/mmu_context.h>
17#include <asm/smp.h>
Ralf Baechleae036b72007-03-27 15:11:54 +010018#include <asm/mips-boards/maltaint.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010019#include <asm/mipsregs.h>
20#include <asm/cacheflush.h>
21#include <asm/time.h>
22#include <asm/addrspace.h>
23#include <asm/smtc.h>
24#include <asm/smtc_ipi.h>
25#include <asm/smtc_proc.h>
26
27/*
28 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
29 */
30
Ralf Baechle41c594a2006-04-05 09:45:45 +010031#define MIPS_CPU_IPI_IRQ 1
32
33#define LOCK_MT_PRA() \
34 local_irq_save(flags); \
35 mtflags = dmt()
36
37#define UNLOCK_MT_PRA() \
38 emt(mtflags); \
39 local_irq_restore(flags)
40
41#define LOCK_CORE_PRA() \
42 local_irq_save(flags); \
43 mtflags = dvpe()
44
45#define UNLOCK_CORE_PRA() \
46 evpe(mtflags); \
47 local_irq_restore(flags)
48
49/*
50 * Data structures purely associated with SMTC parallelism
51 */
52
53
54/*
55 * Table for tracking ASIDs whose lifetime is prolonged.
56 */
57
58asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
59
60/*
61 * Clock interrupt "latch" buffers, per "CPU"
62 */
63
64unsigned int ipi_timer_latch[NR_CPUS];
65
66/*
67 * Number of InterProcessor Interupt (IPI) message buffers to allocate
68 */
69
70#define IPIBUF_PER_CPU 4
71
Ralf Baechle58687562007-02-05 00:33:21 +000072static struct smtc_ipi_q IPIQ[NR_CPUS];
73static struct smtc_ipi_q freeIPIq;
Ralf Baechle41c594a2006-04-05 09:45:45 +010074
75
76/* Forward declarations */
77
Ralf Baechle937a8012006-10-07 19:44:33 +010078void ipi_decode(struct smtc_ipi *);
Ralf Baechle58687562007-02-05 00:33:21 +000079static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
80static void setup_cross_vpe_interrupts(void);
Ralf Baechle41c594a2006-04-05 09:45:45 +010081void init_smtc_stats(void);
82
83/* Global SMTC Status */
84
85unsigned int smtc_status = 0;
86
87/* Boot command line configuration overrides */
88
89static int vpelimit = 0;
90static int tclimit = 0;
91static int ipibuffers = 0;
92static int nostlb = 0;
93static int asidmask = 0;
94unsigned long smtc_asid_mask = 0xff;
95
96static int __init maxvpes(char *str)
97{
98 get_option(&str, &vpelimit);
99 return 1;
100}
101
102static int __init maxtcs(char *str)
103{
104 get_option(&str, &tclimit);
105 return 1;
106}
107
108static int __init ipibufs(char *str)
109{
110 get_option(&str, &ipibuffers);
111 return 1;
112}
113
114static int __init stlb_disable(char *s)
115{
116 nostlb = 1;
117 return 1;
118}
119
120static int __init asidmask_set(char *str)
121{
122 get_option(&str, &asidmask);
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100123 switch (asidmask) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100124 case 0x1:
125 case 0x3:
126 case 0x7:
127 case 0xf:
128 case 0x1f:
129 case 0x3f:
130 case 0x7f:
131 case 0xff:
132 smtc_asid_mask = (unsigned long)asidmask;
133 break;
134 default:
135 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
136 }
137 return 1;
138}
139
140__setup("maxvpes=", maxvpes);
141__setup("maxtcs=", maxtcs);
142__setup("ipibufs=", ipibufs);
143__setup("nostlb", stlb_disable);
144__setup("asidmask=", asidmask_set);
145
Ralf Baechlec68644d2007-02-26 20:46:34 +0000146#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100147
148static int hang_trig = 0;
149
150static int __init hangtrig_enable(char *s)
151{
152 hang_trig = 1;
153 return 1;
154}
155
156
157__setup("hangtrig", hangtrig_enable);
158
159#define DEFAULT_BLOCKED_IPI_LIMIT 32
160
161static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
162
163static int __init tintq(char *str)
164{
165 get_option(&str, &timerq_limit);
166 return 1;
167}
168
169__setup("tintq=", tintq);
170
171int imstuckcount[2][8];
172/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
173int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}};
174int tcnoprog[NR_CPUS];
175static atomic_t idle_hook_initialized = {0};
176static int clock_hang_reported[NR_CPUS];
177
Ralf Baechlec68644d2007-02-26 20:46:34 +0000178#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100179
180/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
181
182void __init sanitize_tlb_entries(void)
183{
184 printk("Deprecated sanitize_tlb_entries() invoked\n");
185}
186
187
188/*
189 * Configure shared TLB - VPC configuration bit must be set by caller
190 */
191
Ralf Baechle58687562007-02-05 00:33:21 +0000192static void smtc_configure_tlb(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100193{
194 int i,tlbsiz,vpes;
195 unsigned long mvpconf0;
196 unsigned long config1val;
197
198 /* Set up ASID preservation table */
199 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
200 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
201 smtc_live_asid[vpes][i] = 0;
202 }
203 }
204 mvpconf0 = read_c0_mvpconf0();
205
206 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
207 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
208 /* If we have multiple VPEs, try to share the TLB */
209 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
210 /*
211 * If TLB sizing is programmable, shared TLB
212 * size is the total available complement.
213 * Otherwise, we have to take the sum of all
214 * static VPE TLB entries.
215 */
216 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
217 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
218 /*
219 * If there's more than one VPE, there had better
220 * be more than one TC, because we need one to bind
221 * to each VPE in turn to be able to read
222 * its configuration state!
223 */
224 settc(1);
225 /* Stop the TC from doing anything foolish */
226 write_tc_c0_tchalt(TCHALT_H);
227 mips_ihb();
228 /* No need to un-Halt - that happens later anyway */
229 for (i=0; i < vpes; i++) {
230 write_tc_c0_tcbind(i);
231 /*
232 * To be 100% sure we're really getting the right
233 * information, we exit the configuration state
234 * and do an IHB after each rebinding.
235 */
236 write_c0_mvpcontrol(
237 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
238 mips_ihb();
239 /*
240 * Only count if the MMU Type indicated is TLB
241 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100242 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100243 config1val = read_vpe_c0_config1();
244 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
245 }
246
247 /* Put core back in configuration state */
248 write_c0_mvpcontrol(
249 read_c0_mvpcontrol() | MVPCONTROL_VPC );
250 mips_ihb();
251 }
252 }
253 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
Ralf Baechlec80697b2007-01-17 18:58:44 +0000254 ehb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100255
256 /*
257 * Setup kernel data structures to use software total,
258 * rather than read the per-VPE Config1 value. The values
259 * for "CPU 0" gets copied to all the other CPUs as part
260 * of their initialization in smtc_cpu_setup().
261 */
262
Ralf Baechlea0b62182007-01-19 14:35:14 +0000263 /* MIPS32 limits TLB indices to 64 */
264 if (tlbsiz > 64)
265 tlbsiz = 64;
266 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100267 smtc_status |= SMTC_TLB_SHARED;
Ralf Baechlea0b62182007-01-19 14:35:14 +0000268 local_flush_tlb_all();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100269
270 printk("TLB of %d entry pairs shared by %d VPEs\n",
271 tlbsiz, vpes);
272 } else {
273 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
274 }
275 }
276}
277
278
279/*
280 * Incrementally build the CPU map out of constituent MIPS MT cores,
281 * using the specified available VPEs and TCs. Plaform code needs
282 * to ensure that each MIPS MT core invokes this routine on reset,
283 * one at a time(!).
284 *
285 * This version of the build_cpu_map and prepare_cpus routines assumes
286 * that *all* TCs of a MIPS MT core will be used for Linux, and that
287 * they will be spread across *all* available VPEs (to minimise the
288 * loss of efficiency due to exception service serialization).
289 * An improved version would pick up configuration information and
290 * possibly leave some TCs/VPEs as "slave" processors.
291 *
292 * Use c0_MVPConf0 to find out how many TCs are available, setting up
293 * phys_cpu_present_map and the logical/physical mappings.
294 */
295
296int __init mipsmt_build_cpu_map(int start_cpu_slot)
297{
298 int i, ntcs;
299
300 /*
301 * The CPU map isn't actually used for anything at this point,
302 * so it's not clear what else we should do apart from set
303 * everything up so that "logical" = "physical".
304 */
305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
307 cpu_set(i, phys_cpu_present_map);
308 __cpu_number_map[i] = i;
309 __cpu_logical_map[i] = i;
310 }
311 /* Initialize map of CPUs with FPUs */
312 cpus_clear(mt_fpu_cpumask);
313
314 /* One of those TC's is the one booting, and not a secondary... */
315 printk("%i available secondary CPU TC(s)\n", i - 1);
316
317 return i;
318}
319
320/*
321 * Common setup before any secondaries are started
322 * Make sure all CPU's are in a sensible state before we boot any of the
323 * secondaries.
324 *
325 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
326 * as possible across the available VPEs.
327 */
328
329static void smtc_tc_setup(int vpe, int tc, int cpu)
330{
331 settc(tc);
332 write_tc_c0_tchalt(TCHALT_H);
333 mips_ihb();
334 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
335 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
336 | TCSTATUS_A);
337 write_tc_c0_tccontext(0);
338 /* Bind tc to vpe */
339 write_tc_c0_tcbind(vpe);
340 /* In general, all TCs should have the same cpu_data indications */
341 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
342 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
343 if (cpu_data[0].cputype == CPU_34K)
344 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
345 cpu_data[cpu].vpe_id = vpe;
346 cpu_data[cpu].tc_id = tc;
347}
348
349
350void mipsmt_prepare_cpus(void)
351{
352 int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
353 unsigned long flags;
354 unsigned long val;
355 int nipi;
356 struct smtc_ipi *pipi;
357
358 /* disable interrupts so we can disable MT */
359 local_irq_save(flags);
360 /* disable MT so we can configure */
361 dvpe();
362 dmt();
363
Ingo Molnar34af9462006-06-27 02:53:55 -0700364 spin_lock_init(&freeIPIq.lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100365
366 /*
367 * We probably don't have as many VPEs as we do SMP "CPUs",
368 * but it's possible - and in any case we'll never use more!
369 */
370 for (i=0; i<NR_CPUS; i++) {
371 IPIQ[i].head = IPIQ[i].tail = NULL;
Ingo Molnar34af9462006-06-27 02:53:55 -0700372 spin_lock_init(&IPIQ[i].lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100373 IPIQ[i].depth = 0;
374 ipi_timer_latch[i] = 0;
375 }
376
377 /* cpu_data index starts at zero */
378 cpu = 0;
379 cpu_data[cpu].vpe_id = 0;
380 cpu_data[cpu].tc_id = 0;
381 cpu++;
382
383 /* Report on boot-time options */
384 mips_mt_set_cpuoptions ();
385 if (vpelimit > 0)
386 printk("Limit of %d VPEs set\n", vpelimit);
387 if (tclimit > 0)
388 printk("Limit of %d TCs set\n", tclimit);
389 if (nostlb) {
390 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
391 }
392 if (asidmask)
393 printk("ASID mask value override to 0x%x\n", asidmask);
394
395 /* Temporary */
Ralf Baechlec68644d2007-02-26 20:46:34 +0000396#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100397 if (hang_trig)
398 printk("Logic Analyser Trigger on suspected TC hang\n");
Ralf Baechlec68644d2007-02-26 20:46:34 +0000399#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100400
401 /* Put MVPE's into 'configuration state' */
402 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
403
404 val = read_c0_mvpconf0();
405 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
406 if (vpelimit > 0 && nvpe > vpelimit)
407 nvpe = vpelimit;
408 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
409 if (ntc > NR_CPUS)
410 ntc = NR_CPUS;
411 if (tclimit > 0 && ntc > tclimit)
412 ntc = tclimit;
413 tcpervpe = ntc / nvpe;
414 slop = ntc % nvpe; /* Residual TCs, < NVPE */
415
416 /* Set up shared TLB */
417 smtc_configure_tlb();
418
419 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
420 /*
421 * Set the MVP bits.
422 */
423 settc(tc);
424 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
425 if (vpe != 0)
426 printk(", ");
427 printk("VPE %d: TC", vpe);
428 for (i = 0; i < tcpervpe; i++) {
429 /*
430 * TC 0 is bound to VPE 0 at reset,
431 * and is presumably executing this
432 * code. Leave it alone!
433 */
434 if (tc != 0) {
435 smtc_tc_setup(vpe,tc, cpu);
436 cpu++;
437 }
438 printk(" %d", tc);
439 tc++;
440 }
441 if (slop) {
442 if (tc != 0) {
443 smtc_tc_setup(vpe,tc, cpu);
444 cpu++;
445 }
446 printk(" %d", tc);
447 tc++;
448 slop--;
449 }
450 if (vpe != 0) {
451 /*
452 * Clear any stale software interrupts from VPE's Cause
453 */
454 write_vpe_c0_cause(0);
455
456 /*
457 * Clear ERL/EXL of VPEs other than 0
458 * and set restricted interrupt enable/mask.
459 */
460 write_vpe_c0_status((read_vpe_c0_status()
461 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
462 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
463 | ST0_IE));
464 /*
465 * set config to be the same as vpe0,
466 * particularly kseg0 coherency alg
467 */
468 write_vpe_c0_config(read_c0_config());
469 /* Clear any pending timer interrupt */
470 write_vpe_c0_compare(0);
471 /* Propagate Config7 */
472 write_vpe_c0_config7(read_c0_config7());
Ralf Baechle64c590b2006-11-01 00:22:00 +0000473 write_vpe_c0_count(read_c0_count());
Ralf Baechle41c594a2006-04-05 09:45:45 +0100474 }
475 /* enable multi-threading within VPE */
476 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
477 /* enable the VPE */
478 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
479 }
480
481 /*
482 * Pull any physically present but unused TCs out of circulation.
483 */
484 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
485 cpu_clear(tc, phys_cpu_present_map);
486 cpu_clear(tc, cpu_present_map);
487 tc++;
488 }
489
490 /* release config state */
491 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
492
493 printk("\n");
494
495 /* Set up coprocessor affinity CPU mask(s) */
496
497 for (tc = 0; tc < ntc; tc++) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100498 if (cpu_data[tc].options & MIPS_CPU_FPU)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100499 cpu_set(tc, mt_fpu_cpumask);
500 }
501
502 /* set up ipi interrupts... */
503
504 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
505
506 if (nvpe > 1)
507 setup_cross_vpe_interrupts();
508
509 /* Set up queue of free IPI "messages". */
510 nipi = NR_CPUS * IPIBUF_PER_CPU;
511 if (ipibuffers > 0)
512 nipi = ipibuffers;
513
514 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
515 if (pipi == NULL)
516 panic("kmalloc of IPI message buffers failed\n");
517 else
518 printk("IPI buffer pool of %d buffers\n", nipi);
519 for (i = 0; i < nipi; i++) {
520 smtc_ipi_nq(&freeIPIq, pipi);
521 pipi++;
522 }
523
524 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
525 emt(EMT_ENABLE);
526 evpe(EVPE_ENABLE);
527 local_irq_restore(flags);
528 /* Initialize SMTC /proc statistics/diagnostics */
529 init_smtc_stats();
530}
531
532
533/*
534 * Setup the PC, SP, and GP of a secondary processor and start it
535 * running!
536 * smp_bootstrap is the place to resume from
537 * __KSTK_TOS(idle) is apparently the stack pointer
538 * (unsigned long)idle->thread_info the gp
539 *
540 */
541void smtc_boot_secondary(int cpu, struct task_struct *idle)
542{
543 extern u32 kernelsp[NR_CPUS];
544 long flags;
545 int mtflags;
546
547 LOCK_MT_PRA();
548 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
549 dvpe();
550 }
551 settc(cpu_data[cpu].tc_id);
552
553 /* pc */
554 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
555
556 /* stack pointer */
557 kernelsp[cpu] = __KSTK_TOS(idle);
558 write_tc_gpr_sp(__KSTK_TOS(idle));
559
560 /* global pointer */
561 write_tc_gpr_gp((unsigned long)idle->thread_info);
562
563 smtc_status |= SMTC_MTC_ACTIVE;
564 write_tc_c0_tchalt(0);
565 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
566 evpe(EVPE_ENABLE);
567 }
568 UNLOCK_MT_PRA();
569}
570
571void smtc_init_secondary(void)
572{
573 /*
574 * Start timer on secondary VPEs if necessary.
Ralf Baechle54d0a212006-07-09 21:38:56 +0100575 * plat_timer_setup has already have been invoked by init/main
Ralf Baechle41c594a2006-04-05 09:45:45 +0100576 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
577 * SMTC init code assigns TCs consdecutively and in ascending order
578 * to across available VPEs.
579 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100580 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
581 ((read_c0_tcbind() & TCBIND_CURVPE)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100582 != cpu_data[smp_processor_id() - 1].vpe_id)){
583 write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
584 }
585
586 local_irq_enable();
587}
588
589void smtc_smp_finish(void)
590{
591 printk("TC %d going on-line as CPU %d\n",
592 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
593}
594
595void smtc_cpus_done(void)
596{
597}
598
599/*
600 * Support for SMTC-optimized driver IRQ registration
601 */
602
603/*
604 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
605 * in do_IRQ. These are passed in setup_irq_smtc() and stored
606 * in this table.
607 */
608
609int setup_irq_smtc(unsigned int irq, struct irqaction * new,
610 unsigned long hwmask)
611{
612 irq_hwmask[irq] = hwmask;
613
614 return setup_irq(irq, new);
615}
616
617/*
618 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
619 * Within a VPE one TC can interrupt another by different approaches.
620 * The easiest to get right would probably be to make all TCs except
621 * the target IXMT and set a software interrupt, but an IXMT-based
622 * scheme requires that a handler must run before a new IPI could
623 * be sent, which would break the "broadcast" loops in MIPS MT.
624 * A more gonzo approach within a VPE is to halt the TC, extract
625 * its Restart, Status, and a couple of GPRs, and program the Restart
626 * address to emulate an interrupt.
627 *
628 * Within a VPE, one can be confident that the target TC isn't in
629 * a critical EXL state when halted, since the write to the Halt
630 * register could not have issued on the writing thread if the
631 * halting thread had EXL set. So k0 and k1 of the target TC
632 * can be used by the injection code. Across VPEs, one can't
633 * be certain that the target TC isn't in a critical exception
634 * state. So we try a two-step process of sending a software
635 * interrupt to the target VPE, which either handles the event
636 * itself (if it was the target) or injects the event within
637 * the VPE.
638 */
639
Ralf Baechle58687562007-02-05 00:33:21 +0000640static void smtc_ipi_qdump(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100641{
642 int i;
643
644 for (i = 0; i < NR_CPUS ;i++) {
645 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
646 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
647 IPIQ[i].depth);
648 }
649}
650
651/*
652 * The standard atomic.h primitives don't quite do what we want
653 * here: We need an atomic add-and-return-previous-value (which
654 * could be done with atomic_add_return and a decrement) and an
655 * atomic set/zero-and-return-previous-value (which can't really
656 * be done with the atomic.h primitives). And since this is
657 * MIPS MT, we can assume that we have LL/SC.
658 */
659static __inline__ int atomic_postincrement(unsigned int *pv)
660{
661 unsigned long result;
662
663 unsigned long temp;
664
665 __asm__ __volatile__(
666 "1: ll %0, %2 \n"
667 " addu %1, %0, 1 \n"
668 " sc %1, %2 \n"
669 " beqz %1, 1b \n"
670 " sync \n"
671 : "=&r" (result), "=&r" (temp), "=m" (*pv)
672 : "m" (*pv)
673 : "memory");
674
675 return result;
676}
677
Ralf Baechle41c594a2006-04-05 09:45:45 +0100678void smtc_send_ipi(int cpu, int type, unsigned int action)
679{
680 int tcstatus;
681 struct smtc_ipi *pipi;
682 long flags;
683 int mtflags;
684
685 if (cpu == smp_processor_id()) {
686 printk("Cannot Send IPI to self!\n");
687 return;
688 }
689 /* Set up a descriptor, to be delivered either promptly or queued */
690 pipi = smtc_ipi_dq(&freeIPIq);
691 if (pipi == NULL) {
692 bust_spinlocks(1);
693 mips_mt_regdump(dvpe());
694 panic("IPI Msg. Buffers Depleted\n");
695 }
696 pipi->type = type;
697 pipi->arg = (void *)action;
698 pipi->dest = cpu;
699 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
700 /* If not on same VPE, enqueue and send cross-VPE interupt */
701 smtc_ipi_nq(&IPIQ[cpu], pipi);
702 LOCK_CORE_PRA();
703 settc(cpu_data[cpu].tc_id);
704 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
705 UNLOCK_CORE_PRA();
706 } else {
707 /*
708 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
709 * since ASID shootdown on the other VPE may
710 * collide with this operation.
711 */
712 LOCK_CORE_PRA();
713 settc(cpu_data[cpu].tc_id);
714 /* Halt the targeted TC */
715 write_tc_c0_tchalt(TCHALT_H);
716 mips_ihb();
717
718 /*
719 * Inspect TCStatus - if IXMT is set, we have to queue
720 * a message. Otherwise, we set up the "interrupt"
721 * of the other TC
722 */
723 tcstatus = read_tc_c0_tcstatus();
724
725 if ((tcstatus & TCSTATUS_IXMT) != 0) {
726 /*
727 * Spin-waiting here can deadlock,
728 * so we queue the message for the target TC.
729 */
730 write_tc_c0_tchalt(0);
731 UNLOCK_CORE_PRA();
732 /* Try to reduce redundant timer interrupt messages */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100733 if (type == SMTC_CLOCK_TICK) {
734 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
Ralf Baechle41c594a2006-04-05 09:45:45 +0100735 smtc_ipi_nq(&freeIPIq, pipi);
736 return;
737 }
738 }
739 smtc_ipi_nq(&IPIQ[cpu], pipi);
740 } else {
741 post_direct_ipi(cpu, pipi);
742 write_tc_c0_tchalt(0);
743 UNLOCK_CORE_PRA();
744 }
745 }
746}
747
748/*
749 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
750 */
Ralf Baechle58687562007-02-05 00:33:21 +0000751static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100752{
753 struct pt_regs *kstack;
754 unsigned long tcstatus;
755 unsigned long tcrestart;
756 extern u32 kernelsp[NR_CPUS];
757 extern void __smtc_ipi_vector(void);
758
759 /* Extract Status, EPC from halted TC */
760 tcstatus = read_tc_c0_tcstatus();
761 tcrestart = read_tc_c0_tcrestart();
762 /* If TCRestart indicates a WAIT instruction, advance the PC */
763 if ((tcrestart & 0x80000000)
764 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
765 tcrestart += 4;
766 }
767 /*
768 * Save on TC's future kernel stack
769 *
770 * CU bit of Status is indicator that TC was
771 * already running on a kernel stack...
772 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100773 if (tcstatus & ST0_CU0) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100774 /* Note that this "- 1" is pointer arithmetic */
775 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
776 } else {
777 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
778 }
779
780 kstack->cp0_epc = (long)tcrestart;
781 /* Save TCStatus */
782 kstack->cp0_tcstatus = tcstatus;
783 /* Pass token of operation to be performed kernel stack pad area */
784 kstack->pad0[4] = (unsigned long)pipi;
785 /* Pass address of function to be called likewise */
786 kstack->pad0[5] = (unsigned long)&ipi_decode;
787 /* Set interrupt exempt and kernel mode */
788 tcstatus |= TCSTATUS_IXMT;
789 tcstatus &= ~TCSTATUS_TKSU;
790 write_tc_c0_tcstatus(tcstatus);
791 ehb();
792 /* Set TC Restart address to be SMTC IPI vector */
793 write_tc_c0_tcrestart(__smtc_ipi_vector);
794}
795
Ralf Baechle937a8012006-10-07 19:44:33 +0100796static void ipi_resched_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100797{
798 /* Return from interrupt should be enough to cause scheduler check */
799}
800
801
Ralf Baechle937a8012006-10-07 19:44:33 +0100802static void ipi_call_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100803{
804 /* Invoke generic function invocation code in smp.c */
805 smp_call_function_interrupt();
806}
807
Ralf Baechle937a8012006-10-07 19:44:33 +0100808void ipi_decode(struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100809{
810 void *arg_copy = pipi->arg;
811 int type_copy = pipi->type;
812 int dest_copy = pipi->dest;
813
814 smtc_ipi_nq(&freeIPIq, pipi);
815 switch (type_copy) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100816 case SMTC_CLOCK_TICK:
Ralf Baechleae036b72007-03-27 15:11:54 +0100817 irq_enter();
818 kstat_this_cpu.irqs[MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR]++;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100819 /* Invoke Clock "Interrupt" */
820 ipi_timer_latch[dest_copy] = 0;
Ralf Baechlec68644d2007-02-26 20:46:34 +0000821#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100822 clock_hang_reported[dest_copy] = 0;
Ralf Baechlec68644d2007-02-26 20:46:34 +0000823#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle937a8012006-10-07 19:44:33 +0100824 local_timer_interrupt(0, NULL);
Ralf Baechleae036b72007-03-27 15:11:54 +0100825 irq_exit();
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100826 break;
827 case LINUX_SMP_IPI:
828 switch ((int)arg_copy) {
829 case SMP_RESCHEDULE_YOURSELF:
Ralf Baechle937a8012006-10-07 19:44:33 +0100830 ipi_resched_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100831 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100832 case SMP_CALL_FUNCTION:
Ralf Baechle937a8012006-10-07 19:44:33 +0100833 ipi_call_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100834 break;
835 default:
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100836 printk("Impossible SMTC IPI Argument 0x%x\n",
837 (int)arg_copy);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100838 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100839 }
840 break;
841 default:
842 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
843 break;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100844 }
845}
846
Ralf Baechle937a8012006-10-07 19:44:33 +0100847void deferred_smtc_ipi(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100848{
849 struct smtc_ipi *pipi;
850 unsigned long flags;
851/* DEBUG */
852 int q = smp_processor_id();
853
854 /*
855 * Test is not atomic, but much faster than a dequeue,
856 * and the vast majority of invocations will have a null queue.
857 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100858 if (IPIQ[q].head != NULL) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100859 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
860 /* ipi_decode() should be called with interrupts off */
861 local_irq_save(flags);
Ralf Baechle937a8012006-10-07 19:44:33 +0100862 ipi_decode(pipi);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100863 local_irq_restore(flags);
864 }
865 }
866}
867
868/*
869 * Send clock tick to all TCs except the one executing the funtion
870 */
871
872void smtc_timer_broadcast(int vpe)
873{
874 int cpu;
875 int myTC = cpu_data[smp_processor_id()].tc_id;
876 int myVPE = cpu_data[smp_processor_id()].vpe_id;
877
878 smtc_cpu_stats[smp_processor_id()].timerints++;
879
880 for_each_online_cpu(cpu) {
881 if (cpu_data[cpu].vpe_id == myVPE &&
882 cpu_data[cpu].tc_id != myTC)
883 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
884 }
885}
886
887/*
888 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
889 * set via cross-VPE MTTR manipulation of the Cause register. It would be
890 * in some regards preferable to have external logic for "doorbell" hardware
891 * interrupts.
892 */
893
Atsushi Nemoto97dcb822007-01-08 02:14:29 +0900894static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100895
Ralf Baechle937a8012006-10-07 19:44:33 +0100896static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100897{
898 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
899 int my_tc = cpu_data[smp_processor_id()].tc_id;
900 int cpu;
901 struct smtc_ipi *pipi;
902 unsigned long tcstatus;
903 int sent;
904 long flags;
905 unsigned int mtflags;
906 unsigned int vpflags;
907
908 /*
909 * So long as cross-VPE interrupts are done via
910 * MFTR/MTTR read-modify-writes of Cause, we need
911 * to stop other VPEs whenever the local VPE does
912 * anything similar.
913 */
914 local_irq_save(flags);
915 vpflags = dvpe();
916 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
917 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
918 irq_enable_hazard();
919 evpe(vpflags);
920 local_irq_restore(flags);
921
922 /*
923 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
924 * queued for TCs on this VPE other than the current one.
925 * Return-from-interrupt should cause us to drain the queue
926 * for the current TC, so we ought not to have to do it explicitly here.
927 */
928
929 for_each_online_cpu(cpu) {
930 if (cpu_data[cpu].vpe_id != my_vpe)
931 continue;
932
933 pipi = smtc_ipi_dq(&IPIQ[cpu]);
934 if (pipi != NULL) {
935 if (cpu_data[cpu].tc_id != my_tc) {
936 sent = 0;
937 LOCK_MT_PRA();
938 settc(cpu_data[cpu].tc_id);
939 write_tc_c0_tchalt(TCHALT_H);
940 mips_ihb();
941 tcstatus = read_tc_c0_tcstatus();
942 if ((tcstatus & TCSTATUS_IXMT) == 0) {
943 post_direct_ipi(cpu, pipi);
944 sent = 1;
945 }
946 write_tc_c0_tchalt(0);
947 UNLOCK_MT_PRA();
948 if (!sent) {
949 smtc_ipi_req(&IPIQ[cpu], pipi);
950 }
951 } else {
952 /*
953 * ipi_decode() should be called
954 * with interrupts off
955 */
956 local_irq_save(flags);
Ralf Baechle937a8012006-10-07 19:44:33 +0100957 ipi_decode(pipi);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100958 local_irq_restore(flags);
959 }
960 }
961 }
962
963 return IRQ_HANDLED;
964}
965
Ralf Baechle937a8012006-10-07 19:44:33 +0100966static void ipi_irq_dispatch(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100967{
Ralf Baechle937a8012006-10-07 19:44:33 +0100968 do_IRQ(cpu_ipi_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100969}
970
971static struct irqaction irq_ipi;
972
Ralf Baechle58687562007-02-05 00:33:21 +0000973static void setup_cross_vpe_interrupts(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100974{
975 if (!cpu_has_vint)
976 panic("SMTC Kernel requires Vectored Interupt support");
977
978 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
979
980 irq_ipi.handler = ipi_interrupt;
Thomas Gleixnerf40298f2006-07-01 19:29:20 -0700981 irq_ipi.flags = IRQF_DISABLED;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100982 irq_ipi.name = "SMTC_IPI";
983
984 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
985
986 irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
Atsushi Nemoto14178362006-11-14 01:13:18 +0900987 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100988}
989
990/*
991 * SMTC-specific hacks invoked from elsewhere in the kernel.
992 */
993
Ralf Baechleac8be952007-01-20 00:18:01 +0000994void smtc_ipi_replay(void)
995{
996 /*
997 * To the extent that we've ever turned interrupts off,
998 * we may have accumulated deferred IPIs. This is subtle.
999 * If we use the smtc_ipi_qdepth() macro, we'll get an
1000 * exact number - but we'll also disable interrupts
1001 * and create a window of failure where a new IPI gets
1002 * queued after we test the depth but before we re-enable
1003 * interrupts. So long as IXMT never gets set, however,
1004 * we should be OK: If we pick up something and dispatch
1005 * it here, that's great. If we see nothing, but concurrent
1006 * with this operation, another TC sends us an IPI, IXMT
1007 * is clear, and we'll handle it as a real pseudo-interrupt
1008 * and not a pseudo-pseudo interrupt.
1009 */
1010 if (IPIQ[smp_processor_id()].depth > 0) {
1011 struct smtc_ipi *pipi;
1012 extern void self_ipi(struct smtc_ipi *);
1013
1014 while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
1015 self_ipi(pipi);
1016 smtc_cpu_stats[smp_processor_id()].selfipis++;
1017 }
1018 }
1019}
1020
Ralf Baechleec43c012007-01-24 19:23:21 +00001021EXPORT_SYMBOL(smtc_ipi_replay);
1022
Ralf Baechle41c594a2006-04-05 09:45:45 +01001023void smtc_idle_loop_hook(void)
1024{
Ralf Baechlec68644d2007-02-26 20:46:34 +00001025#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +01001026 int im;
1027 int flags;
1028 int mtflags;
1029 int bit;
1030 int vpe;
1031 int tc;
1032 int hook_ntcs;
1033 /*
1034 * printk within DMT-protected regions can deadlock,
1035 * so buffer diagnostic messages for later output.
1036 */
1037 char *pdb_msg;
1038 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1039
1040 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1041 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1042 int mvpconf0;
1043 /* Tedious stuff to just do once */
1044 mvpconf0 = read_c0_mvpconf0();
1045 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1046 if (hook_ntcs > NR_CPUS)
1047 hook_ntcs = NR_CPUS;
1048 for (tc = 0; tc < hook_ntcs; tc++) {
1049 tcnoprog[tc] = 0;
1050 clock_hang_reported[tc] = 0;
1051 }
1052 for (vpe = 0; vpe < 2; vpe++)
1053 for (im = 0; im < 8; im++)
1054 imstuckcount[vpe][im] = 0;
1055 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1056 atomic_set(&idle_hook_initialized, 1000);
1057 } else {
1058 /* Someone else is initializing in parallel - let 'em finish */
1059 while (atomic_read(&idle_hook_initialized) < 1000)
1060 ;
1061 }
1062 }
1063
1064 /* Have we stupidly left IXMT set somewhere? */
1065 if (read_c0_tcstatus() & 0x400) {
1066 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1067 ehb();
1068 printk("Dangling IXMT in cpu_idle()\n");
1069 }
1070
1071 /* Have we stupidly left an IM bit turned off? */
1072#define IM_LIMIT 2000
1073 local_irq_save(flags);
1074 mtflags = dmt();
1075 pdb_msg = &id_ho_db_msg[0];
1076 im = read_c0_status();
1077 vpe = cpu_data[smp_processor_id()].vpe_id;
1078 for (bit = 0; bit < 8; bit++) {
1079 /*
1080 * In current prototype, I/O interrupts
1081 * are masked for VPE > 0
1082 */
1083 if (vpemask[vpe][bit]) {
1084 if (!(im & (0x100 << bit)))
1085 imstuckcount[vpe][bit]++;
1086 else
1087 imstuckcount[vpe][bit] = 0;
1088 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1089 set_c0_status(0x100 << bit);
1090 ehb();
1091 imstuckcount[vpe][bit] = 0;
1092 pdb_msg += sprintf(pdb_msg,
1093 "Dangling IM %d fixed for VPE %d\n", bit,
1094 vpe);
1095 }
1096 }
1097 }
1098
1099 /*
1100 * Now that we limit outstanding timer IPIs, check for hung TC
1101 */
1102 for (tc = 0; tc < NR_CPUS; tc++) {
1103 /* Don't check ourself - we'll dequeue IPIs just below */
1104 if ((tc != smp_processor_id()) &&
1105 ipi_timer_latch[tc] > timerq_limit) {
1106 if (clock_hang_reported[tc] == 0) {
1107 pdb_msg += sprintf(pdb_msg,
1108 "TC %d looks hung with timer latch at %d\n",
1109 tc, ipi_timer_latch[tc]);
1110 clock_hang_reported[tc]++;
1111 }
1112 }
1113 }
1114 emt(mtflags);
1115 local_irq_restore(flags);
1116 if (pdb_msg != &id_ho_db_msg[0])
1117 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
Ralf Baechlec68644d2007-02-26 20:46:34 +00001118#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +01001119
Ralf Baechleac8be952007-01-20 00:18:01 +00001120 /*
1121 * Replay any accumulated deferred IPIs. If "Instant Replay"
1122 * is in use, there should never be any.
1123 */
1124#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1125 smtc_ipi_replay();
1126#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
Ralf Baechle41c594a2006-04-05 09:45:45 +01001127}
1128
1129void smtc_soft_dump(void)
1130{
1131 int i;
1132
1133 printk("Counter Interrupts taken per CPU (TC)\n");
1134 for (i=0; i < NR_CPUS; i++) {
1135 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1136 }
1137 printk("Self-IPI invocations:\n");
1138 for (i=0; i < NR_CPUS; i++) {
1139 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1140 }
1141 smtc_ipi_qdump();
1142 printk("Timer IPI Backlogs:\n");
1143 for (i=0; i < NR_CPUS; i++) {
1144 printk("%d: %d\n", i, ipi_timer_latch[i]);
1145 }
1146 printk("%d Recoveries of \"stolen\" FPU\n",
1147 atomic_read(&smtc_fpu_recoveries));
1148}
1149
1150
1151/*
1152 * TLB management routines special to SMTC
1153 */
1154
1155void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1156{
1157 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1158 int tlb, i;
1159
1160 /*
1161 * It would be nice to be able to use a spinlock here,
1162 * but this is invoked from within TLB flush routines
1163 * that protect themselves with DVPE, so if a lock is
Ralf Baechlee0daad42007-02-05 00:10:11 +00001164 * held by another TC, it'll never be freed.
Ralf Baechle41c594a2006-04-05 09:45:45 +01001165 *
1166 * DVPE/DMT must not be done with interrupts enabled,
1167 * so even so most callers will already have disabled
1168 * them, let's be really careful...
1169 */
1170
1171 local_irq_save(flags);
1172 if (smtc_status & SMTC_TLB_SHARED) {
1173 mtflags = dvpe();
1174 tlb = 0;
1175 } else {
1176 mtflags = dmt();
1177 tlb = cpu_data[cpu].vpe_id;
1178 }
1179 asid = asid_cache(cpu);
1180
1181 do {
1182 if (!((asid += ASID_INC) & ASID_MASK) ) {
1183 if (cpu_has_vtag_icache)
1184 flush_icache_all();
1185 /* Traverse all online CPUs (hack requires contigous range) */
1186 for (i = 0; i < num_online_cpus(); i++) {
1187 /*
1188 * We don't need to worry about our own CPU, nor those of
1189 * CPUs who don't share our TLB.
1190 */
1191 if ((i != smp_processor_id()) &&
1192 ((smtc_status & SMTC_TLB_SHARED) ||
1193 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1194 settc(cpu_data[i].tc_id);
1195 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1196 if (!prevhalt) {
1197 write_tc_c0_tchalt(TCHALT_H);
1198 mips_ihb();
1199 }
1200 tcstat = read_tc_c0_tcstatus();
1201 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1202 if (!prevhalt)
1203 write_tc_c0_tchalt(0);
1204 }
1205 }
1206 if (!asid) /* fix version if needed */
1207 asid = ASID_FIRST_VERSION;
1208 local_flush_tlb_all(); /* start new asid cycle */
1209 }
1210 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1211
1212 /*
1213 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1214 */
1215 for (i = 0; i < num_online_cpus(); i++) {
1216 if ((smtc_status & SMTC_TLB_SHARED) ||
1217 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1218 cpu_context(i, mm) = asid_cache(i) = asid;
1219 }
1220
1221 if (smtc_status & SMTC_TLB_SHARED)
1222 evpe(mtflags);
1223 else
1224 emt(mtflags);
1225 local_irq_restore(flags);
1226}
1227
1228/*
1229 * Invoked from macros defined in mmu_context.h
1230 * which must already have disabled interrupts
1231 * and done a DVPE or DMT as appropriate.
1232 */
1233
1234void smtc_flush_tlb_asid(unsigned long asid)
1235{
1236 int entry;
1237 unsigned long ehi;
1238
1239 entry = read_c0_wired();
1240
1241 /* Traverse all non-wired entries */
1242 while (entry < current_cpu_data.tlbsize) {
1243 write_c0_index(entry);
1244 ehb();
1245 tlb_read();
1246 ehb();
1247 ehi = read_c0_entryhi();
Ralf Baechle4bf42d42006-07-08 11:32:58 +01001248 if ((ehi & ASID_MASK) == asid) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001249 /*
1250 * Invalidate only entries with specified ASID,
1251 * makiing sure all entries differ.
1252 */
1253 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1254 write_c0_entrylo0(0);
1255 write_c0_entrylo1(0);
1256 mtc0_tlbw_hazard();
1257 tlb_write_indexed();
1258 }
1259 entry++;
1260 }
1261 write_c0_index(PARKED_INDEX);
1262 tlbw_use_hazard();
1263}
1264
1265/*
1266 * Support for single-threading cache flush operations.
1267 */
1268
Ralf Baechle58687562007-02-05 00:33:21 +00001269static int halt_state_save[NR_CPUS];
Ralf Baechle41c594a2006-04-05 09:45:45 +01001270
1271/*
1272 * To really, really be sure that nothing is being done
1273 * by other TCs, halt them all. This code assumes that
1274 * a DVPE has already been done, so while their Halted
1275 * state is theoretically architecturally unstable, in
1276 * practice, it's not going to change while we're looking
1277 * at it.
1278 */
1279
1280void smtc_cflush_lockdown(void)
1281{
1282 int cpu;
1283
1284 for_each_online_cpu(cpu) {
1285 if (cpu != smp_processor_id()) {
1286 settc(cpu_data[cpu].tc_id);
1287 halt_state_save[cpu] = read_tc_c0_tchalt();
1288 write_tc_c0_tchalt(TCHALT_H);
1289 }
1290 }
1291 mips_ihb();
1292}
1293
1294/* It would be cheating to change the cpu_online states during a flush! */
1295
1296void smtc_cflush_release(void)
1297{
1298 int cpu;
1299
1300 /*
1301 * Start with a hazard barrier to ensure
1302 * that all CACHE ops have played through.
1303 */
1304 mips_ihb();
1305
1306 for_each_online_cpu(cpu) {
1307 if (cpu != smp_processor_id()) {
1308 settc(cpu_data[cpu].tc_id);
1309 write_tc_c0_tchalt(halt_state_save[cpu]);
1310 }
1311 }
1312 mips_ihb();
1313}