blob: 16aa5d37117c03049034d3c52755548015e2d55a [file] [log] [blame]
Ralf Baechle41c594a2006-04-05 09:45:45 +01001/* Copyright (C) 2004 Mips Technologies, Inc */
2
3#include <linux/kernel.h>
4#include <linux/sched.h>
5#include <linux/cpumask.h>
6#include <linux/interrupt.h>
Ralf Baechleae036b72007-03-27 15:11:54 +01007#include <linux/kernel_stat.h>
Ralf Baechleec43c012007-01-24 19:23:21 +00008#include <linux/module.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +01009
10#include <asm/cpu.h>
11#include <asm/processor.h>
12#include <asm/atomic.h>
13#include <asm/system.h>
14#include <asm/hardirq.h>
15#include <asm/hazards.h>
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +010016#include <asm/irq.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010017#include <asm/mmu_context.h>
18#include <asm/smp.h>
19#include <asm/mipsregs.h>
20#include <asm/cacheflush.h>
21#include <asm/time.h>
22#include <asm/addrspace.h>
23#include <asm/smtc.h>
24#include <asm/smtc_ipi.h>
25#include <asm/smtc_proc.h>
26
27/*
28 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
29 */
30
Ralf Baechle41c594a2006-04-05 09:45:45 +010031#define MIPS_CPU_IPI_IRQ 1
32
33#define LOCK_MT_PRA() \
34 local_irq_save(flags); \
35 mtflags = dmt()
36
37#define UNLOCK_MT_PRA() \
38 emt(mtflags); \
39 local_irq_restore(flags)
40
41#define LOCK_CORE_PRA() \
42 local_irq_save(flags); \
43 mtflags = dvpe()
44
45#define UNLOCK_CORE_PRA() \
46 evpe(mtflags); \
47 local_irq_restore(flags)
48
49/*
50 * Data structures purely associated with SMTC parallelism
51 */
52
53
54/*
55 * Table for tracking ASIDs whose lifetime is prolonged.
56 */
57
58asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
59
60/*
61 * Clock interrupt "latch" buffers, per "CPU"
62 */
63
64unsigned int ipi_timer_latch[NR_CPUS];
65
66/*
67 * Number of InterProcessor Interupt (IPI) message buffers to allocate
68 */
69
70#define IPIBUF_PER_CPU 4
71
Ralf Baechle58687562007-02-05 00:33:21 +000072static struct smtc_ipi_q IPIQ[NR_CPUS];
73static struct smtc_ipi_q freeIPIq;
Ralf Baechle41c594a2006-04-05 09:45:45 +010074
75
76/* Forward declarations */
77
Ralf Baechle937a8012006-10-07 19:44:33 +010078void ipi_decode(struct smtc_ipi *);
Ralf Baechle58687562007-02-05 00:33:21 +000079static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
Ralf Baechle20bb25d2007-03-27 15:19:58 +010080static void setup_cross_vpe_interrupts(unsigned int nvpe);
Ralf Baechle41c594a2006-04-05 09:45:45 +010081void init_smtc_stats(void);
82
83/* Global SMTC Status */
84
85unsigned int smtc_status = 0;
86
87/* Boot command line configuration overrides */
88
Ralf Baechle41c594a2006-04-05 09:45:45 +010089static int ipibuffers = 0;
90static int nostlb = 0;
91static int asidmask = 0;
92unsigned long smtc_asid_mask = 0xff;
93
Ralf Baechle41c594a2006-04-05 09:45:45 +010094static int __init ipibufs(char *str)
95{
96 get_option(&str, &ipibuffers);
97 return 1;
98}
99
100static int __init stlb_disable(char *s)
101{
102 nostlb = 1;
103 return 1;
104}
105
106static int __init asidmask_set(char *str)
107{
108 get_option(&str, &asidmask);
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100109 switch (asidmask) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100110 case 0x1:
111 case 0x3:
112 case 0x7:
113 case 0xf:
114 case 0x1f:
115 case 0x3f:
116 case 0x7f:
117 case 0xff:
118 smtc_asid_mask = (unsigned long)asidmask;
119 break;
120 default:
121 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
122 }
123 return 1;
124}
125
Ralf Baechle41c594a2006-04-05 09:45:45 +0100126__setup("ipibufs=", ipibufs);
127__setup("nostlb", stlb_disable);
128__setup("asidmask=", asidmask_set);
129
Ralf Baechlec68644d2007-02-26 20:46:34 +0000130#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100131
132static int hang_trig = 0;
133
134static int __init hangtrig_enable(char *s)
135{
136 hang_trig = 1;
137 return 1;
138}
139
140
141__setup("hangtrig", hangtrig_enable);
142
143#define DEFAULT_BLOCKED_IPI_LIMIT 32
144
145static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
146
147static int __init tintq(char *str)
148{
149 get_option(&str, &timerq_limit);
150 return 1;
151}
152
153__setup("tintq=", tintq);
154
Ralf Baechle97aef632007-07-27 18:36:32 +0100155static int imstuckcount[2][8];
Ralf Baechle41c594a2006-04-05 09:45:45 +0100156/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
Ralf Baechle97aef632007-07-27 18:36:32 +0100157static int vpemask[2][8] = {
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100158 {0, 0, 1, 0, 0, 0, 0, 1},
159 {0, 0, 0, 0, 0, 0, 0, 1}
160};
Ralf Baechle41c594a2006-04-05 09:45:45 +0100161int tcnoprog[NR_CPUS];
162static atomic_t idle_hook_initialized = {0};
163static int clock_hang_reported[NR_CPUS];
164
Ralf Baechlec68644d2007-02-26 20:46:34 +0000165#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100166
167/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
168
169void __init sanitize_tlb_entries(void)
170{
171 printk("Deprecated sanitize_tlb_entries() invoked\n");
172}
173
174
175/*
176 * Configure shared TLB - VPC configuration bit must be set by caller
177 */
178
Ralf Baechle58687562007-02-05 00:33:21 +0000179static void smtc_configure_tlb(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100180{
181 int i,tlbsiz,vpes;
182 unsigned long mvpconf0;
183 unsigned long config1val;
184
185 /* Set up ASID preservation table */
186 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
187 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
188 smtc_live_asid[vpes][i] = 0;
189 }
190 }
191 mvpconf0 = read_c0_mvpconf0();
192
193 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
194 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
195 /* If we have multiple VPEs, try to share the TLB */
196 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
197 /*
198 * If TLB sizing is programmable, shared TLB
199 * size is the total available complement.
200 * Otherwise, we have to take the sum of all
201 * static VPE TLB entries.
202 */
203 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
204 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
205 /*
206 * If there's more than one VPE, there had better
207 * be more than one TC, because we need one to bind
208 * to each VPE in turn to be able to read
209 * its configuration state!
210 */
211 settc(1);
212 /* Stop the TC from doing anything foolish */
213 write_tc_c0_tchalt(TCHALT_H);
214 mips_ihb();
215 /* No need to un-Halt - that happens later anyway */
216 for (i=0; i < vpes; i++) {
217 write_tc_c0_tcbind(i);
218 /*
219 * To be 100% sure we're really getting the right
220 * information, we exit the configuration state
221 * and do an IHB after each rebinding.
222 */
223 write_c0_mvpcontrol(
224 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
225 mips_ihb();
226 /*
227 * Only count if the MMU Type indicated is TLB
228 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100229 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100230 config1val = read_vpe_c0_config1();
231 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
232 }
233
234 /* Put core back in configuration state */
235 write_c0_mvpcontrol(
236 read_c0_mvpcontrol() | MVPCONTROL_VPC );
237 mips_ihb();
238 }
239 }
240 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
Ralf Baechlec80697b2007-01-17 18:58:44 +0000241 ehb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100242
243 /*
244 * Setup kernel data structures to use software total,
245 * rather than read the per-VPE Config1 value. The values
246 * for "CPU 0" gets copied to all the other CPUs as part
247 * of their initialization in smtc_cpu_setup().
248 */
249
Ralf Baechlea0b62182007-01-19 14:35:14 +0000250 /* MIPS32 limits TLB indices to 64 */
251 if (tlbsiz > 64)
252 tlbsiz = 64;
253 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100254 smtc_status |= SMTC_TLB_SHARED;
Ralf Baechlea0b62182007-01-19 14:35:14 +0000255 local_flush_tlb_all();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100256
257 printk("TLB of %d entry pairs shared by %d VPEs\n",
258 tlbsiz, vpes);
259 } else {
260 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
261 }
262 }
263}
264
265
266/*
267 * Incrementally build the CPU map out of constituent MIPS MT cores,
268 * using the specified available VPEs and TCs. Plaform code needs
269 * to ensure that each MIPS MT core invokes this routine on reset,
270 * one at a time(!).
271 *
272 * This version of the build_cpu_map and prepare_cpus routines assumes
273 * that *all* TCs of a MIPS MT core will be used for Linux, and that
274 * they will be spread across *all* available VPEs (to minimise the
275 * loss of efficiency due to exception service serialization).
276 * An improved version would pick up configuration information and
277 * possibly leave some TCs/VPEs as "slave" processors.
278 *
279 * Use c0_MVPConf0 to find out how many TCs are available, setting up
280 * phys_cpu_present_map and the logical/physical mappings.
281 */
282
283int __init mipsmt_build_cpu_map(int start_cpu_slot)
284{
285 int i, ntcs;
286
287 /*
288 * The CPU map isn't actually used for anything at this point,
289 * so it's not clear what else we should do apart from set
290 * everything up so that "logical" = "physical".
291 */
292 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
293 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
294 cpu_set(i, phys_cpu_present_map);
295 __cpu_number_map[i] = i;
296 __cpu_logical_map[i] = i;
297 }
298 /* Initialize map of CPUs with FPUs */
299 cpus_clear(mt_fpu_cpumask);
300
301 /* One of those TC's is the one booting, and not a secondary... */
302 printk("%i available secondary CPU TC(s)\n", i - 1);
303
304 return i;
305}
306
307/*
308 * Common setup before any secondaries are started
309 * Make sure all CPU's are in a sensible state before we boot any of the
310 * secondaries.
311 *
312 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
313 * as possible across the available VPEs.
314 */
315
316static void smtc_tc_setup(int vpe, int tc, int cpu)
317{
318 settc(tc);
319 write_tc_c0_tchalt(TCHALT_H);
320 mips_ihb();
321 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
322 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
323 | TCSTATUS_A);
324 write_tc_c0_tccontext(0);
325 /* Bind tc to vpe */
326 write_tc_c0_tcbind(vpe);
327 /* In general, all TCs should have the same cpu_data indications */
328 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
329 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
330 if (cpu_data[0].cputype == CPU_34K)
331 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
332 cpu_data[cpu].vpe_id = vpe;
333 cpu_data[cpu].tc_id = tc;
334}
335
336
337void mipsmt_prepare_cpus(void)
338{
339 int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
340 unsigned long flags;
341 unsigned long val;
342 int nipi;
343 struct smtc_ipi *pipi;
344
345 /* disable interrupts so we can disable MT */
346 local_irq_save(flags);
347 /* disable MT so we can configure */
348 dvpe();
349 dmt();
350
Ingo Molnar34af9462006-06-27 02:53:55 -0700351 spin_lock_init(&freeIPIq.lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100352
353 /*
354 * We probably don't have as many VPEs as we do SMP "CPUs",
355 * but it's possible - and in any case we'll never use more!
356 */
357 for (i=0; i<NR_CPUS; i++) {
358 IPIQ[i].head = IPIQ[i].tail = NULL;
Ingo Molnar34af9462006-06-27 02:53:55 -0700359 spin_lock_init(&IPIQ[i].lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100360 IPIQ[i].depth = 0;
361 ipi_timer_latch[i] = 0;
362 }
363
364 /* cpu_data index starts at zero */
365 cpu = 0;
366 cpu_data[cpu].vpe_id = 0;
367 cpu_data[cpu].tc_id = 0;
368 cpu++;
369
370 /* Report on boot-time options */
371 mips_mt_set_cpuoptions ();
372 if (vpelimit > 0)
373 printk("Limit of %d VPEs set\n", vpelimit);
374 if (tclimit > 0)
375 printk("Limit of %d TCs set\n", tclimit);
376 if (nostlb) {
377 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
378 }
379 if (asidmask)
380 printk("ASID mask value override to 0x%x\n", asidmask);
381
382 /* Temporary */
Ralf Baechlec68644d2007-02-26 20:46:34 +0000383#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100384 if (hang_trig)
385 printk("Logic Analyser Trigger on suspected TC hang\n");
Ralf Baechlec68644d2007-02-26 20:46:34 +0000386#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100387
388 /* Put MVPE's into 'configuration state' */
389 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
390
391 val = read_c0_mvpconf0();
392 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
393 if (vpelimit > 0 && nvpe > vpelimit)
394 nvpe = vpelimit;
395 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
396 if (ntc > NR_CPUS)
397 ntc = NR_CPUS;
398 if (tclimit > 0 && ntc > tclimit)
399 ntc = tclimit;
400 tcpervpe = ntc / nvpe;
401 slop = ntc % nvpe; /* Residual TCs, < NVPE */
402
403 /* Set up shared TLB */
404 smtc_configure_tlb();
405
406 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
407 /*
408 * Set the MVP bits.
409 */
410 settc(tc);
411 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
412 if (vpe != 0)
413 printk(", ");
414 printk("VPE %d: TC", vpe);
415 for (i = 0; i < tcpervpe; i++) {
416 /*
417 * TC 0 is bound to VPE 0 at reset,
418 * and is presumably executing this
419 * code. Leave it alone!
420 */
421 if (tc != 0) {
422 smtc_tc_setup(vpe,tc, cpu);
423 cpu++;
424 }
425 printk(" %d", tc);
426 tc++;
427 }
428 if (slop) {
429 if (tc != 0) {
430 smtc_tc_setup(vpe,tc, cpu);
431 cpu++;
432 }
433 printk(" %d", tc);
434 tc++;
435 slop--;
436 }
437 if (vpe != 0) {
438 /*
439 * Clear any stale software interrupts from VPE's Cause
440 */
441 write_vpe_c0_cause(0);
442
443 /*
444 * Clear ERL/EXL of VPEs other than 0
445 * and set restricted interrupt enable/mask.
446 */
447 write_vpe_c0_status((read_vpe_c0_status()
448 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
449 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
450 | ST0_IE));
451 /*
452 * set config to be the same as vpe0,
453 * particularly kseg0 coherency alg
454 */
455 write_vpe_c0_config(read_c0_config());
456 /* Clear any pending timer interrupt */
457 write_vpe_c0_compare(0);
458 /* Propagate Config7 */
459 write_vpe_c0_config7(read_c0_config7());
Ralf Baechle64c590b2006-11-01 00:22:00 +0000460 write_vpe_c0_count(read_c0_count());
Ralf Baechle41c594a2006-04-05 09:45:45 +0100461 }
462 /* enable multi-threading within VPE */
463 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
464 /* enable the VPE */
465 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
466 }
467
468 /*
469 * Pull any physically present but unused TCs out of circulation.
470 */
471 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
472 cpu_clear(tc, phys_cpu_present_map);
473 cpu_clear(tc, cpu_present_map);
474 tc++;
475 }
476
477 /* release config state */
478 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
479
480 printk("\n");
481
482 /* Set up coprocessor affinity CPU mask(s) */
483
484 for (tc = 0; tc < ntc; tc++) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100485 if (cpu_data[tc].options & MIPS_CPU_FPU)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100486 cpu_set(tc, mt_fpu_cpumask);
487 }
488
489 /* set up ipi interrupts... */
490
491 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
492
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100493 setup_cross_vpe_interrupts(nvpe);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100494
495 /* Set up queue of free IPI "messages". */
496 nipi = NR_CPUS * IPIBUF_PER_CPU;
497 if (ipibuffers > 0)
498 nipi = ipibuffers;
499
500 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
501 if (pipi == NULL)
502 panic("kmalloc of IPI message buffers failed\n");
503 else
504 printk("IPI buffer pool of %d buffers\n", nipi);
505 for (i = 0; i < nipi; i++) {
506 smtc_ipi_nq(&freeIPIq, pipi);
507 pipi++;
508 }
509
510 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
511 emt(EMT_ENABLE);
512 evpe(EVPE_ENABLE);
513 local_irq_restore(flags);
514 /* Initialize SMTC /proc statistics/diagnostics */
515 init_smtc_stats();
516}
517
518
519/*
520 * Setup the PC, SP, and GP of a secondary processor and start it
521 * running!
522 * smp_bootstrap is the place to resume from
523 * __KSTK_TOS(idle) is apparently the stack pointer
524 * (unsigned long)idle->thread_info the gp
525 *
526 */
Ralf Baechlee119d492007-07-28 00:54:32 +0100527void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100528{
529 extern u32 kernelsp[NR_CPUS];
530 long flags;
531 int mtflags;
532
533 LOCK_MT_PRA();
534 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
535 dvpe();
536 }
537 settc(cpu_data[cpu].tc_id);
538
539 /* pc */
540 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
541
542 /* stack pointer */
543 kernelsp[cpu] = __KSTK_TOS(idle);
544 write_tc_gpr_sp(__KSTK_TOS(idle));
545
546 /* global pointer */
Roman Zippelc9f4f062007-05-09 02:35:16 -0700547 write_tc_gpr_gp((unsigned long)task_thread_info(idle));
Ralf Baechle41c594a2006-04-05 09:45:45 +0100548
549 smtc_status |= SMTC_MTC_ACTIVE;
550 write_tc_c0_tchalt(0);
551 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
552 evpe(EVPE_ENABLE);
553 }
554 UNLOCK_MT_PRA();
555}
556
557void smtc_init_secondary(void)
558{
559 /*
560 * Start timer on secondary VPEs if necessary.
Ralf Baechle54d0a212006-07-09 21:38:56 +0100561 * plat_timer_setup has already have been invoked by init/main
Ralf Baechle41c594a2006-04-05 09:45:45 +0100562 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
563 * SMTC init code assigns TCs consdecutively and in ascending order
564 * to across available VPEs.
565 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100566 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
567 ((read_c0_tcbind() & TCBIND_CURVPE)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100568 != cpu_data[smp_processor_id() - 1].vpe_id)){
569 write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
570 }
571
572 local_irq_enable();
573}
574
575void smtc_smp_finish(void)
576{
577 printk("TC %d going on-line as CPU %d\n",
578 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
579}
580
581void smtc_cpus_done(void)
582{
583}
584
585/*
586 * Support for SMTC-optimized driver IRQ registration
587 */
588
589/*
590 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
591 * in do_IRQ. These are passed in setup_irq_smtc() and stored
592 * in this table.
593 */
594
595int setup_irq_smtc(unsigned int irq, struct irqaction * new,
596 unsigned long hwmask)
597{
Ralf Baechleef36fc32007-05-31 13:36:57 +0100598#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100599 unsigned int vpe = current_cpu_data.vpe_id;
600
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +0100601 vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100602#endif
Ralf Baechleef36fc32007-05-31 13:36:57 +0100603 irq_hwmask[irq] = hwmask;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100604
605 return setup_irq(irq, new);
606}
607
608/*
609 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
610 * Within a VPE one TC can interrupt another by different approaches.
611 * The easiest to get right would probably be to make all TCs except
612 * the target IXMT and set a software interrupt, but an IXMT-based
613 * scheme requires that a handler must run before a new IPI could
614 * be sent, which would break the "broadcast" loops in MIPS MT.
615 * A more gonzo approach within a VPE is to halt the TC, extract
616 * its Restart, Status, and a couple of GPRs, and program the Restart
617 * address to emulate an interrupt.
618 *
619 * Within a VPE, one can be confident that the target TC isn't in
620 * a critical EXL state when halted, since the write to the Halt
621 * register could not have issued on the writing thread if the
622 * halting thread had EXL set. So k0 and k1 of the target TC
623 * can be used by the injection code. Across VPEs, one can't
624 * be certain that the target TC isn't in a critical exception
625 * state. So we try a two-step process of sending a software
626 * interrupt to the target VPE, which either handles the event
627 * itself (if it was the target) or injects the event within
628 * the VPE.
629 */
630
Ralf Baechle58687562007-02-05 00:33:21 +0000631static void smtc_ipi_qdump(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100632{
633 int i;
634
635 for (i = 0; i < NR_CPUS ;i++) {
636 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
637 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
638 IPIQ[i].depth);
639 }
640}
641
642/*
643 * The standard atomic.h primitives don't quite do what we want
644 * here: We need an atomic add-and-return-previous-value (which
645 * could be done with atomic_add_return and a decrement) and an
646 * atomic set/zero-and-return-previous-value (which can't really
647 * be done with the atomic.h primitives). And since this is
648 * MIPS MT, we can assume that we have LL/SC.
649 */
650static __inline__ int atomic_postincrement(unsigned int *pv)
651{
652 unsigned long result;
653
654 unsigned long temp;
655
656 __asm__ __volatile__(
657 "1: ll %0, %2 \n"
658 " addu %1, %0, 1 \n"
659 " sc %1, %2 \n"
660 " beqz %1, 1b \n"
661 " sync \n"
662 : "=&r" (result), "=&r" (temp), "=m" (*pv)
663 : "m" (*pv)
664 : "memory");
665
666 return result;
667}
668
Ralf Baechle41c594a2006-04-05 09:45:45 +0100669void smtc_send_ipi(int cpu, int type, unsigned int action)
670{
671 int tcstatus;
672 struct smtc_ipi *pipi;
673 long flags;
674 int mtflags;
675
676 if (cpu == smp_processor_id()) {
677 printk("Cannot Send IPI to self!\n");
678 return;
679 }
680 /* Set up a descriptor, to be delivered either promptly or queued */
681 pipi = smtc_ipi_dq(&freeIPIq);
682 if (pipi == NULL) {
683 bust_spinlocks(1);
684 mips_mt_regdump(dvpe());
685 panic("IPI Msg. Buffers Depleted\n");
686 }
687 pipi->type = type;
688 pipi->arg = (void *)action;
689 pipi->dest = cpu;
690 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
691 /* If not on same VPE, enqueue and send cross-VPE interupt */
692 smtc_ipi_nq(&IPIQ[cpu], pipi);
693 LOCK_CORE_PRA();
694 settc(cpu_data[cpu].tc_id);
695 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
696 UNLOCK_CORE_PRA();
697 } else {
698 /*
699 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
700 * since ASID shootdown on the other VPE may
701 * collide with this operation.
702 */
703 LOCK_CORE_PRA();
704 settc(cpu_data[cpu].tc_id);
705 /* Halt the targeted TC */
706 write_tc_c0_tchalt(TCHALT_H);
707 mips_ihb();
708
709 /*
710 * Inspect TCStatus - if IXMT is set, we have to queue
711 * a message. Otherwise, we set up the "interrupt"
712 * of the other TC
713 */
714 tcstatus = read_tc_c0_tcstatus();
715
716 if ((tcstatus & TCSTATUS_IXMT) != 0) {
717 /*
718 * Spin-waiting here can deadlock,
719 * so we queue the message for the target TC.
720 */
721 write_tc_c0_tchalt(0);
722 UNLOCK_CORE_PRA();
723 /* Try to reduce redundant timer interrupt messages */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100724 if (type == SMTC_CLOCK_TICK) {
725 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
Ralf Baechle41c594a2006-04-05 09:45:45 +0100726 smtc_ipi_nq(&freeIPIq, pipi);
727 return;
728 }
729 }
730 smtc_ipi_nq(&IPIQ[cpu], pipi);
731 } else {
732 post_direct_ipi(cpu, pipi);
733 write_tc_c0_tchalt(0);
734 UNLOCK_CORE_PRA();
735 }
736 }
737}
738
739/*
740 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
741 */
Ralf Baechle58687562007-02-05 00:33:21 +0000742static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100743{
744 struct pt_regs *kstack;
745 unsigned long tcstatus;
746 unsigned long tcrestart;
747 extern u32 kernelsp[NR_CPUS];
748 extern void __smtc_ipi_vector(void);
749
750 /* Extract Status, EPC from halted TC */
751 tcstatus = read_tc_c0_tcstatus();
752 tcrestart = read_tc_c0_tcrestart();
753 /* If TCRestart indicates a WAIT instruction, advance the PC */
754 if ((tcrestart & 0x80000000)
755 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
756 tcrestart += 4;
757 }
758 /*
759 * Save on TC's future kernel stack
760 *
761 * CU bit of Status is indicator that TC was
762 * already running on a kernel stack...
763 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100764 if (tcstatus & ST0_CU0) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100765 /* Note that this "- 1" is pointer arithmetic */
766 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
767 } else {
768 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
769 }
770
771 kstack->cp0_epc = (long)tcrestart;
772 /* Save TCStatus */
773 kstack->cp0_tcstatus = tcstatus;
774 /* Pass token of operation to be performed kernel stack pad area */
775 kstack->pad0[4] = (unsigned long)pipi;
776 /* Pass address of function to be called likewise */
777 kstack->pad0[5] = (unsigned long)&ipi_decode;
778 /* Set interrupt exempt and kernel mode */
779 tcstatus |= TCSTATUS_IXMT;
780 tcstatus &= ~TCSTATUS_TKSU;
781 write_tc_c0_tcstatus(tcstatus);
782 ehb();
783 /* Set TC Restart address to be SMTC IPI vector */
784 write_tc_c0_tcrestart(__smtc_ipi_vector);
785}
786
Ralf Baechle937a8012006-10-07 19:44:33 +0100787static void ipi_resched_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100788{
789 /* Return from interrupt should be enough to cause scheduler check */
790}
791
792
Ralf Baechle937a8012006-10-07 19:44:33 +0100793static void ipi_call_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100794{
795 /* Invoke generic function invocation code in smp.c */
796 smp_call_function_interrupt();
797}
798
Ralf Baechle937a8012006-10-07 19:44:33 +0100799void ipi_decode(struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100800{
801 void *arg_copy = pipi->arg;
802 int type_copy = pipi->type;
803 int dest_copy = pipi->dest;
804
805 smtc_ipi_nq(&freeIPIq, pipi);
806 switch (type_copy) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100807 case SMTC_CLOCK_TICK:
Ralf Baechleae036b72007-03-27 15:11:54 +0100808 irq_enter();
Chris Dearman8e15a0e2007-06-21 12:59:58 +0100809 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100810 /* Invoke Clock "Interrupt" */
811 ipi_timer_latch[dest_copy] = 0;
Ralf Baechlec68644d2007-02-26 20:46:34 +0000812#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100813 clock_hang_reported[dest_copy] = 0;
Ralf Baechlec68644d2007-02-26 20:46:34 +0000814#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle937a8012006-10-07 19:44:33 +0100815 local_timer_interrupt(0, NULL);
Ralf Baechleae036b72007-03-27 15:11:54 +0100816 irq_exit();
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100817 break;
818 case LINUX_SMP_IPI:
819 switch ((int)arg_copy) {
820 case SMP_RESCHEDULE_YOURSELF:
Ralf Baechle937a8012006-10-07 19:44:33 +0100821 ipi_resched_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100822 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100823 case SMP_CALL_FUNCTION:
Ralf Baechle937a8012006-10-07 19:44:33 +0100824 ipi_call_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100825 break;
826 default:
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100827 printk("Impossible SMTC IPI Argument 0x%x\n",
828 (int)arg_copy);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100829 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100830 }
831 break;
832 default:
833 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
834 break;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100835 }
836}
837
Ralf Baechle937a8012006-10-07 19:44:33 +0100838void deferred_smtc_ipi(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100839{
840 struct smtc_ipi *pipi;
841 unsigned long flags;
842/* DEBUG */
843 int q = smp_processor_id();
844
845 /*
846 * Test is not atomic, but much faster than a dequeue,
847 * and the vast majority of invocations will have a null queue.
848 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100849 if (IPIQ[q].head != NULL) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100850 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
851 /* ipi_decode() should be called with interrupts off */
852 local_irq_save(flags);
Ralf Baechle937a8012006-10-07 19:44:33 +0100853 ipi_decode(pipi);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100854 local_irq_restore(flags);
855 }
856 }
857}
858
859/*
860 * Send clock tick to all TCs except the one executing the funtion
861 */
862
Ralf Baechleefaa5342007-07-27 18:39:19 +0100863void smtc_timer_broadcast(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100864{
865 int cpu;
866 int myTC = cpu_data[smp_processor_id()].tc_id;
867 int myVPE = cpu_data[smp_processor_id()].vpe_id;
868
869 smtc_cpu_stats[smp_processor_id()].timerints++;
870
871 for_each_online_cpu(cpu) {
872 if (cpu_data[cpu].vpe_id == myVPE &&
873 cpu_data[cpu].tc_id != myTC)
874 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
875 }
876}
877
878/*
879 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
880 * set via cross-VPE MTTR manipulation of the Cause register. It would be
881 * in some regards preferable to have external logic for "doorbell" hardware
882 * interrupts.
883 */
884
Atsushi Nemoto97dcb822007-01-08 02:14:29 +0900885static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100886
Ralf Baechle937a8012006-10-07 19:44:33 +0100887static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100888{
889 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
890 int my_tc = cpu_data[smp_processor_id()].tc_id;
891 int cpu;
892 struct smtc_ipi *pipi;
893 unsigned long tcstatus;
894 int sent;
895 long flags;
896 unsigned int mtflags;
897 unsigned int vpflags;
898
899 /*
900 * So long as cross-VPE interrupts are done via
901 * MFTR/MTTR read-modify-writes of Cause, we need
902 * to stop other VPEs whenever the local VPE does
903 * anything similar.
904 */
905 local_irq_save(flags);
906 vpflags = dvpe();
907 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
908 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
909 irq_enable_hazard();
910 evpe(vpflags);
911 local_irq_restore(flags);
912
913 /*
914 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
915 * queued for TCs on this VPE other than the current one.
916 * Return-from-interrupt should cause us to drain the queue
917 * for the current TC, so we ought not to have to do it explicitly here.
918 */
919
920 for_each_online_cpu(cpu) {
921 if (cpu_data[cpu].vpe_id != my_vpe)
922 continue;
923
924 pipi = smtc_ipi_dq(&IPIQ[cpu]);
925 if (pipi != NULL) {
926 if (cpu_data[cpu].tc_id != my_tc) {
927 sent = 0;
928 LOCK_MT_PRA();
929 settc(cpu_data[cpu].tc_id);
930 write_tc_c0_tchalt(TCHALT_H);
931 mips_ihb();
932 tcstatus = read_tc_c0_tcstatus();
933 if ((tcstatus & TCSTATUS_IXMT) == 0) {
934 post_direct_ipi(cpu, pipi);
935 sent = 1;
936 }
937 write_tc_c0_tchalt(0);
938 UNLOCK_MT_PRA();
939 if (!sent) {
940 smtc_ipi_req(&IPIQ[cpu], pipi);
941 }
942 } else {
943 /*
944 * ipi_decode() should be called
945 * with interrupts off
946 */
947 local_irq_save(flags);
Ralf Baechle937a8012006-10-07 19:44:33 +0100948 ipi_decode(pipi);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100949 local_irq_restore(flags);
950 }
951 }
952 }
953
954 return IRQ_HANDLED;
955}
956
Ralf Baechle937a8012006-10-07 19:44:33 +0100957static void ipi_irq_dispatch(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100958{
Ralf Baechle937a8012006-10-07 19:44:33 +0100959 do_IRQ(cpu_ipi_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100960}
961
Ralf Baechle033890b2007-07-27 18:33:30 +0100962static struct irqaction irq_ipi = {
963 .handler = ipi_interrupt,
964 .flags = IRQF_DISABLED,
965 .name = "SMTC_IPI",
966 .flags = IRQF_PERCPU
967};
Ralf Baechle41c594a2006-04-05 09:45:45 +0100968
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100969static void setup_cross_vpe_interrupts(unsigned int nvpe)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100970{
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100971 if (nvpe < 1)
972 return;
973
Ralf Baechle41c594a2006-04-05 09:45:45 +0100974 if (!cpu_has_vint)
975 panic("SMTC Kernel requires Vectored Interupt support");
976
977 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
978
Ralf Baechle41c594a2006-04-05 09:45:45 +0100979 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
980
Atsushi Nemoto14178362006-11-14 01:13:18 +0900981 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100982}
983
984/*
985 * SMTC-specific hacks invoked from elsewhere in the kernel.
Ralf Baechle8a1e97e2007-03-29 23:42:42 +0100986 *
987 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
988 * called with interrupts disabled. We do rely on interrupts being disabled
989 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
990 * result in a recursive call to raw_local_irq_restore().
Ralf Baechle41c594a2006-04-05 09:45:45 +0100991 */
992
Ralf Baechle8a1e97e2007-03-29 23:42:42 +0100993static void __smtc_ipi_replay(void)
Ralf Baechleac8be952007-01-20 00:18:01 +0000994{
Ralf Baechle8a1e97e2007-03-29 23:42:42 +0100995 unsigned int cpu = smp_processor_id();
996
Ralf Baechleac8be952007-01-20 00:18:01 +0000997 /*
998 * To the extent that we've ever turned interrupts off,
999 * we may have accumulated deferred IPIs. This is subtle.
1000 * If we use the smtc_ipi_qdepth() macro, we'll get an
1001 * exact number - but we'll also disable interrupts
1002 * and create a window of failure where a new IPI gets
1003 * queued after we test the depth but before we re-enable
1004 * interrupts. So long as IXMT never gets set, however,
1005 * we should be OK: If we pick up something and dispatch
1006 * it here, that's great. If we see nothing, but concurrent
1007 * with this operation, another TC sends us an IPI, IXMT
1008 * is clear, and we'll handle it as a real pseudo-interrupt
1009 * and not a pseudo-pseudo interrupt.
1010 */
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001011 if (IPIQ[cpu].depth > 0) {
1012 while (1) {
1013 struct smtc_ipi_q *q = &IPIQ[cpu];
1014 struct smtc_ipi *pipi;
1015 extern void self_ipi(struct smtc_ipi *);
Ralf Baechleac8be952007-01-20 00:18:01 +00001016
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001017 spin_lock(&q->lock);
1018 pipi = __smtc_ipi_dq(q);
1019 spin_unlock(&q->lock);
1020 if (!pipi)
1021 break;
1022
Ralf Baechleac8be952007-01-20 00:18:01 +00001023 self_ipi(pipi);
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001024 smtc_cpu_stats[cpu].selfipis++;
Ralf Baechleac8be952007-01-20 00:18:01 +00001025 }
1026 }
1027}
1028
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001029void smtc_ipi_replay(void)
1030{
1031 raw_local_irq_disable();
1032 __smtc_ipi_replay();
1033}
1034
Ralf Baechleec43c012007-01-24 19:23:21 +00001035EXPORT_SYMBOL(smtc_ipi_replay);
1036
Ralf Baechle41c594a2006-04-05 09:45:45 +01001037void smtc_idle_loop_hook(void)
1038{
Ralf Baechlec68644d2007-02-26 20:46:34 +00001039#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +01001040 int im;
1041 int flags;
1042 int mtflags;
1043 int bit;
1044 int vpe;
1045 int tc;
1046 int hook_ntcs;
1047 /*
1048 * printk within DMT-protected regions can deadlock,
1049 * so buffer diagnostic messages for later output.
1050 */
1051 char *pdb_msg;
1052 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1053
1054 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1055 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1056 int mvpconf0;
1057 /* Tedious stuff to just do once */
1058 mvpconf0 = read_c0_mvpconf0();
1059 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1060 if (hook_ntcs > NR_CPUS)
1061 hook_ntcs = NR_CPUS;
1062 for (tc = 0; tc < hook_ntcs; tc++) {
1063 tcnoprog[tc] = 0;
1064 clock_hang_reported[tc] = 0;
1065 }
1066 for (vpe = 0; vpe < 2; vpe++)
1067 for (im = 0; im < 8; im++)
1068 imstuckcount[vpe][im] = 0;
1069 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1070 atomic_set(&idle_hook_initialized, 1000);
1071 } else {
1072 /* Someone else is initializing in parallel - let 'em finish */
1073 while (atomic_read(&idle_hook_initialized) < 1000)
1074 ;
1075 }
1076 }
1077
1078 /* Have we stupidly left IXMT set somewhere? */
1079 if (read_c0_tcstatus() & 0x400) {
1080 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1081 ehb();
1082 printk("Dangling IXMT in cpu_idle()\n");
1083 }
1084
1085 /* Have we stupidly left an IM bit turned off? */
1086#define IM_LIMIT 2000
1087 local_irq_save(flags);
1088 mtflags = dmt();
1089 pdb_msg = &id_ho_db_msg[0];
1090 im = read_c0_status();
Ralf Baechle8f8771a2007-07-10 17:32:56 +01001091 vpe = current_cpu_data.vpe_id;
Ralf Baechle41c594a2006-04-05 09:45:45 +01001092 for (bit = 0; bit < 8; bit++) {
1093 /*
1094 * In current prototype, I/O interrupts
1095 * are masked for VPE > 0
1096 */
1097 if (vpemask[vpe][bit]) {
1098 if (!(im & (0x100 << bit)))
1099 imstuckcount[vpe][bit]++;
1100 else
1101 imstuckcount[vpe][bit] = 0;
1102 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1103 set_c0_status(0x100 << bit);
1104 ehb();
1105 imstuckcount[vpe][bit] = 0;
1106 pdb_msg += sprintf(pdb_msg,
1107 "Dangling IM %d fixed for VPE %d\n", bit,
1108 vpe);
1109 }
1110 }
1111 }
1112
1113 /*
1114 * Now that we limit outstanding timer IPIs, check for hung TC
1115 */
1116 for (tc = 0; tc < NR_CPUS; tc++) {
1117 /* Don't check ourself - we'll dequeue IPIs just below */
1118 if ((tc != smp_processor_id()) &&
1119 ipi_timer_latch[tc] > timerq_limit) {
1120 if (clock_hang_reported[tc] == 0) {
1121 pdb_msg += sprintf(pdb_msg,
1122 "TC %d looks hung with timer latch at %d\n",
1123 tc, ipi_timer_latch[tc]);
1124 clock_hang_reported[tc]++;
1125 }
1126 }
1127 }
1128 emt(mtflags);
1129 local_irq_restore(flags);
1130 if (pdb_msg != &id_ho_db_msg[0])
1131 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
Ralf Baechlec68644d2007-02-26 20:46:34 +00001132#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +01001133
Ralf Baechleac8be952007-01-20 00:18:01 +00001134 /*
1135 * Replay any accumulated deferred IPIs. If "Instant Replay"
1136 * is in use, there should never be any.
1137 */
1138#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001139 {
1140 unsigned long flags;
1141
1142 local_irq_save(flags);
1143 __smtc_ipi_replay();
1144 local_irq_restore(flags);
1145 }
Ralf Baechleac8be952007-01-20 00:18:01 +00001146#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
Ralf Baechle41c594a2006-04-05 09:45:45 +01001147}
1148
1149void smtc_soft_dump(void)
1150{
1151 int i;
1152
1153 printk("Counter Interrupts taken per CPU (TC)\n");
1154 for (i=0; i < NR_CPUS; i++) {
1155 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1156 }
1157 printk("Self-IPI invocations:\n");
1158 for (i=0; i < NR_CPUS; i++) {
1159 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1160 }
1161 smtc_ipi_qdump();
1162 printk("Timer IPI Backlogs:\n");
1163 for (i=0; i < NR_CPUS; i++) {
1164 printk("%d: %d\n", i, ipi_timer_latch[i]);
1165 }
1166 printk("%d Recoveries of \"stolen\" FPU\n",
1167 atomic_read(&smtc_fpu_recoveries));
1168}
1169
1170
1171/*
1172 * TLB management routines special to SMTC
1173 */
1174
1175void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1176{
1177 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1178 int tlb, i;
1179
1180 /*
1181 * It would be nice to be able to use a spinlock here,
1182 * but this is invoked from within TLB flush routines
1183 * that protect themselves with DVPE, so if a lock is
Ralf Baechlee0daad42007-02-05 00:10:11 +00001184 * held by another TC, it'll never be freed.
Ralf Baechle41c594a2006-04-05 09:45:45 +01001185 *
1186 * DVPE/DMT must not be done with interrupts enabled,
1187 * so even so most callers will already have disabled
1188 * them, let's be really careful...
1189 */
1190
1191 local_irq_save(flags);
1192 if (smtc_status & SMTC_TLB_SHARED) {
1193 mtflags = dvpe();
1194 tlb = 0;
1195 } else {
1196 mtflags = dmt();
1197 tlb = cpu_data[cpu].vpe_id;
1198 }
1199 asid = asid_cache(cpu);
1200
1201 do {
1202 if (!((asid += ASID_INC) & ASID_MASK) ) {
1203 if (cpu_has_vtag_icache)
1204 flush_icache_all();
1205 /* Traverse all online CPUs (hack requires contigous range) */
1206 for (i = 0; i < num_online_cpus(); i++) {
1207 /*
1208 * We don't need to worry about our own CPU, nor those of
1209 * CPUs who don't share our TLB.
1210 */
1211 if ((i != smp_processor_id()) &&
1212 ((smtc_status & SMTC_TLB_SHARED) ||
1213 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1214 settc(cpu_data[i].tc_id);
1215 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1216 if (!prevhalt) {
1217 write_tc_c0_tchalt(TCHALT_H);
1218 mips_ihb();
1219 }
1220 tcstat = read_tc_c0_tcstatus();
1221 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1222 if (!prevhalt)
1223 write_tc_c0_tchalt(0);
1224 }
1225 }
1226 if (!asid) /* fix version if needed */
1227 asid = ASID_FIRST_VERSION;
1228 local_flush_tlb_all(); /* start new asid cycle */
1229 }
1230 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1231
1232 /*
1233 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1234 */
1235 for (i = 0; i < num_online_cpus(); i++) {
1236 if ((smtc_status & SMTC_TLB_SHARED) ||
1237 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1238 cpu_context(i, mm) = asid_cache(i) = asid;
1239 }
1240
1241 if (smtc_status & SMTC_TLB_SHARED)
1242 evpe(mtflags);
1243 else
1244 emt(mtflags);
1245 local_irq_restore(flags);
1246}
1247
1248/*
1249 * Invoked from macros defined in mmu_context.h
1250 * which must already have disabled interrupts
1251 * and done a DVPE or DMT as appropriate.
1252 */
1253
1254void smtc_flush_tlb_asid(unsigned long asid)
1255{
1256 int entry;
1257 unsigned long ehi;
1258
1259 entry = read_c0_wired();
1260
1261 /* Traverse all non-wired entries */
1262 while (entry < current_cpu_data.tlbsize) {
1263 write_c0_index(entry);
1264 ehb();
1265 tlb_read();
1266 ehb();
1267 ehi = read_c0_entryhi();
Ralf Baechle4bf42d42006-07-08 11:32:58 +01001268 if ((ehi & ASID_MASK) == asid) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001269 /*
1270 * Invalidate only entries with specified ASID,
1271 * makiing sure all entries differ.
1272 */
1273 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1274 write_c0_entrylo0(0);
1275 write_c0_entrylo1(0);
1276 mtc0_tlbw_hazard();
1277 tlb_write_indexed();
1278 }
1279 entry++;
1280 }
1281 write_c0_index(PARKED_INDEX);
1282 tlbw_use_hazard();
1283}
1284
1285/*
1286 * Support for single-threading cache flush operations.
1287 */
1288
Ralf Baechle58687562007-02-05 00:33:21 +00001289static int halt_state_save[NR_CPUS];
Ralf Baechle41c594a2006-04-05 09:45:45 +01001290
1291/*
1292 * To really, really be sure that nothing is being done
1293 * by other TCs, halt them all. This code assumes that
1294 * a DVPE has already been done, so while their Halted
1295 * state is theoretically architecturally unstable, in
1296 * practice, it's not going to change while we're looking
1297 * at it.
1298 */
1299
1300void smtc_cflush_lockdown(void)
1301{
1302 int cpu;
1303
1304 for_each_online_cpu(cpu) {
1305 if (cpu != smp_processor_id()) {
1306 settc(cpu_data[cpu].tc_id);
1307 halt_state_save[cpu] = read_tc_c0_tchalt();
1308 write_tc_c0_tchalt(TCHALT_H);
1309 }
1310 }
1311 mips_ihb();
1312}
1313
1314/* It would be cheating to change the cpu_online states during a flush! */
1315
1316void smtc_cflush_release(void)
1317{
1318 int cpu;
1319
1320 /*
1321 * Start with a hazard barrier to ensure
1322 * that all CACHE ops have played through.
1323 */
1324 mips_ihb();
1325
1326 for_each_online_cpu(cpu) {
1327 if (cpu != smp_processor_id()) {
1328 settc(cpu_data[cpu].tc_id);
1329 write_tc_c0_tchalt(halt_state_save[cpu]);
1330 }
1331 }
1332 mips_ihb();
1333}