blob: 137183bba54f38bc1a12e8e56bdc0348c30ebd74 [file] [log] [blame]
Ralf Baechle41c594a2006-04-05 09:45:45 +01001/* Copyright (C) 2004 Mips Technologies, Inc */
2
3#include <linux/kernel.h>
4#include <linux/sched.h>
5#include <linux/cpumask.h>
6#include <linux/interrupt.h>
Ralf Baechleae036b72007-03-27 15:11:54 +01007#include <linux/kernel_stat.h>
Ralf Baechleec43c012007-01-24 19:23:21 +00008#include <linux/module.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +01009
10#include <asm/cpu.h>
11#include <asm/processor.h>
12#include <asm/atomic.h>
13#include <asm/system.h>
14#include <asm/hardirq.h>
15#include <asm/hazards.h>
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +010016#include <asm/irq.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010017#include <asm/mmu_context.h>
18#include <asm/smp.h>
19#include <asm/mipsregs.h>
20#include <asm/cacheflush.h>
21#include <asm/time.h>
22#include <asm/addrspace.h>
23#include <asm/smtc.h>
24#include <asm/smtc_ipi.h>
25#include <asm/smtc_proc.h>
26
27/*
Ralf Baechle1146fe32007-09-21 17:13:55 +010028 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
29 * in do_IRQ. These are passed in setup_irq_smtc() and stored
30 * in this table.
Ralf Baechle41c594a2006-04-05 09:45:45 +010031 */
Ralf Baechle1146fe32007-09-21 17:13:55 +010032unsigned long irq_hwmask[NR_IRQS];
Ralf Baechle41c594a2006-04-05 09:45:45 +010033
Ralf Baechle41c594a2006-04-05 09:45:45 +010034#define LOCK_MT_PRA() \
35 local_irq_save(flags); \
36 mtflags = dmt()
37
38#define UNLOCK_MT_PRA() \
39 emt(mtflags); \
40 local_irq_restore(flags)
41
42#define LOCK_CORE_PRA() \
43 local_irq_save(flags); \
44 mtflags = dvpe()
45
46#define UNLOCK_CORE_PRA() \
47 evpe(mtflags); \
48 local_irq_restore(flags)
49
50/*
51 * Data structures purely associated with SMTC parallelism
52 */
53
54
55/*
56 * Table for tracking ASIDs whose lifetime is prolonged.
57 */
58
59asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
60
61/*
62 * Clock interrupt "latch" buffers, per "CPU"
63 */
64
65unsigned int ipi_timer_latch[NR_CPUS];
66
67/*
68 * Number of InterProcessor Interupt (IPI) message buffers to allocate
69 */
70
71#define IPIBUF_PER_CPU 4
72
Ralf Baechle58687562007-02-05 00:33:21 +000073static struct smtc_ipi_q IPIQ[NR_CPUS];
74static struct smtc_ipi_q freeIPIq;
Ralf Baechle41c594a2006-04-05 09:45:45 +010075
76
77/* Forward declarations */
78
Ralf Baechle937a8012006-10-07 19:44:33 +010079void ipi_decode(struct smtc_ipi *);
Ralf Baechle58687562007-02-05 00:33:21 +000080static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
Ralf Baechle20bb25d2007-03-27 15:19:58 +010081static void setup_cross_vpe_interrupts(unsigned int nvpe);
Ralf Baechle41c594a2006-04-05 09:45:45 +010082void init_smtc_stats(void);
83
84/* Global SMTC Status */
85
86unsigned int smtc_status = 0;
87
88/* Boot command line configuration overrides */
89
Ralf Baechle41c594a2006-04-05 09:45:45 +010090static int ipibuffers = 0;
91static int nostlb = 0;
92static int asidmask = 0;
93unsigned long smtc_asid_mask = 0xff;
94
Ralf Baechle41c594a2006-04-05 09:45:45 +010095static int __init ipibufs(char *str)
96{
97 get_option(&str, &ipibuffers);
98 return 1;
99}
100
101static int __init stlb_disable(char *s)
102{
103 nostlb = 1;
104 return 1;
105}
106
107static int __init asidmask_set(char *str)
108{
109 get_option(&str, &asidmask);
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100110 switch (asidmask) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100111 case 0x1:
112 case 0x3:
113 case 0x7:
114 case 0xf:
115 case 0x1f:
116 case 0x3f:
117 case 0x7f:
118 case 0xff:
119 smtc_asid_mask = (unsigned long)asidmask;
120 break;
121 default:
122 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
123 }
124 return 1;
125}
126
Ralf Baechle41c594a2006-04-05 09:45:45 +0100127__setup("ipibufs=", ipibufs);
128__setup("nostlb", stlb_disable);
129__setup("asidmask=", asidmask_set);
130
Ralf Baechlec68644d2007-02-26 20:46:34 +0000131#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100132
133static int hang_trig = 0;
134
135static int __init hangtrig_enable(char *s)
136{
137 hang_trig = 1;
138 return 1;
139}
140
141
142__setup("hangtrig", hangtrig_enable);
143
144#define DEFAULT_BLOCKED_IPI_LIMIT 32
145
146static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
147
148static int __init tintq(char *str)
149{
150 get_option(&str, &timerq_limit);
151 return 1;
152}
153
154__setup("tintq=", tintq);
155
Ralf Baechle97aef632007-07-27 18:36:32 +0100156static int imstuckcount[2][8];
Ralf Baechle41c594a2006-04-05 09:45:45 +0100157/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
Ralf Baechle97aef632007-07-27 18:36:32 +0100158static int vpemask[2][8] = {
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100159 {0, 0, 1, 0, 0, 0, 0, 1},
160 {0, 0, 0, 0, 0, 0, 0, 1}
161};
Ralf Baechle41c594a2006-04-05 09:45:45 +0100162int tcnoprog[NR_CPUS];
163static atomic_t idle_hook_initialized = {0};
164static int clock_hang_reported[NR_CPUS];
165
Ralf Baechlec68644d2007-02-26 20:46:34 +0000166#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100167
168/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
169
170void __init sanitize_tlb_entries(void)
171{
172 printk("Deprecated sanitize_tlb_entries() invoked\n");
173}
174
175
176/*
177 * Configure shared TLB - VPC configuration bit must be set by caller
178 */
179
Ralf Baechle58687562007-02-05 00:33:21 +0000180static void smtc_configure_tlb(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100181{
182 int i,tlbsiz,vpes;
183 unsigned long mvpconf0;
184 unsigned long config1val;
185
186 /* Set up ASID preservation table */
187 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
188 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
189 smtc_live_asid[vpes][i] = 0;
190 }
191 }
192 mvpconf0 = read_c0_mvpconf0();
193
194 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
195 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
196 /* If we have multiple VPEs, try to share the TLB */
197 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
198 /*
199 * If TLB sizing is programmable, shared TLB
200 * size is the total available complement.
201 * Otherwise, we have to take the sum of all
202 * static VPE TLB entries.
203 */
204 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
205 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
206 /*
207 * If there's more than one VPE, there had better
208 * be more than one TC, because we need one to bind
209 * to each VPE in turn to be able to read
210 * its configuration state!
211 */
212 settc(1);
213 /* Stop the TC from doing anything foolish */
214 write_tc_c0_tchalt(TCHALT_H);
215 mips_ihb();
216 /* No need to un-Halt - that happens later anyway */
217 for (i=0; i < vpes; i++) {
218 write_tc_c0_tcbind(i);
219 /*
220 * To be 100% sure we're really getting the right
221 * information, we exit the configuration state
222 * and do an IHB after each rebinding.
223 */
224 write_c0_mvpcontrol(
225 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
226 mips_ihb();
227 /*
228 * Only count if the MMU Type indicated is TLB
229 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100230 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100231 config1val = read_vpe_c0_config1();
232 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
233 }
234
235 /* Put core back in configuration state */
236 write_c0_mvpcontrol(
237 read_c0_mvpcontrol() | MVPCONTROL_VPC );
238 mips_ihb();
239 }
240 }
241 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
Ralf Baechlec80697b2007-01-17 18:58:44 +0000242 ehb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100243
244 /*
245 * Setup kernel data structures to use software total,
246 * rather than read the per-VPE Config1 value. The values
247 * for "CPU 0" gets copied to all the other CPUs as part
248 * of their initialization in smtc_cpu_setup().
249 */
250
Ralf Baechlea0b62182007-01-19 14:35:14 +0000251 /* MIPS32 limits TLB indices to 64 */
252 if (tlbsiz > 64)
253 tlbsiz = 64;
254 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100255 smtc_status |= SMTC_TLB_SHARED;
Ralf Baechlea0b62182007-01-19 14:35:14 +0000256 local_flush_tlb_all();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100257
258 printk("TLB of %d entry pairs shared by %d VPEs\n",
259 tlbsiz, vpes);
260 } else {
261 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
262 }
263 }
264}
265
266
267/*
268 * Incrementally build the CPU map out of constituent MIPS MT cores,
269 * using the specified available VPEs and TCs. Plaform code needs
270 * to ensure that each MIPS MT core invokes this routine on reset,
271 * one at a time(!).
272 *
273 * This version of the build_cpu_map and prepare_cpus routines assumes
274 * that *all* TCs of a MIPS MT core will be used for Linux, and that
275 * they will be spread across *all* available VPEs (to minimise the
276 * loss of efficiency due to exception service serialization).
277 * An improved version would pick up configuration information and
278 * possibly leave some TCs/VPEs as "slave" processors.
279 *
280 * Use c0_MVPConf0 to find out how many TCs are available, setting up
281 * phys_cpu_present_map and the logical/physical mappings.
282 */
283
284int __init mipsmt_build_cpu_map(int start_cpu_slot)
285{
286 int i, ntcs;
287
288 /*
289 * The CPU map isn't actually used for anything at this point,
290 * so it's not clear what else we should do apart from set
291 * everything up so that "logical" = "physical".
292 */
293 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
294 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
295 cpu_set(i, phys_cpu_present_map);
296 __cpu_number_map[i] = i;
297 __cpu_logical_map[i] = i;
298 }
299 /* Initialize map of CPUs with FPUs */
300 cpus_clear(mt_fpu_cpumask);
301
302 /* One of those TC's is the one booting, and not a secondary... */
303 printk("%i available secondary CPU TC(s)\n", i - 1);
304
305 return i;
306}
307
308/*
309 * Common setup before any secondaries are started
310 * Make sure all CPU's are in a sensible state before we boot any of the
311 * secondaries.
312 *
313 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
314 * as possible across the available VPEs.
315 */
316
317static void smtc_tc_setup(int vpe, int tc, int cpu)
318{
319 settc(tc);
320 write_tc_c0_tchalt(TCHALT_H);
321 mips_ihb();
322 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
323 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
324 | TCSTATUS_A);
325 write_tc_c0_tccontext(0);
326 /* Bind tc to vpe */
327 write_tc_c0_tcbind(vpe);
328 /* In general, all TCs should have the same cpu_data indications */
329 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
330 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
331 if (cpu_data[0].cputype == CPU_34K)
332 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
333 cpu_data[cpu].vpe_id = vpe;
334 cpu_data[cpu].tc_id = tc;
335}
336
337
338void mipsmt_prepare_cpus(void)
339{
340 int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
341 unsigned long flags;
342 unsigned long val;
343 int nipi;
344 struct smtc_ipi *pipi;
345
346 /* disable interrupts so we can disable MT */
347 local_irq_save(flags);
348 /* disable MT so we can configure */
349 dvpe();
350 dmt();
351
Ingo Molnar34af9462006-06-27 02:53:55 -0700352 spin_lock_init(&freeIPIq.lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100353
354 /*
355 * We probably don't have as many VPEs as we do SMP "CPUs",
356 * but it's possible - and in any case we'll never use more!
357 */
358 for (i=0; i<NR_CPUS; i++) {
359 IPIQ[i].head = IPIQ[i].tail = NULL;
Ingo Molnar34af9462006-06-27 02:53:55 -0700360 spin_lock_init(&IPIQ[i].lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100361 IPIQ[i].depth = 0;
362 ipi_timer_latch[i] = 0;
363 }
364
365 /* cpu_data index starts at zero */
366 cpu = 0;
367 cpu_data[cpu].vpe_id = 0;
368 cpu_data[cpu].tc_id = 0;
369 cpu++;
370
371 /* Report on boot-time options */
372 mips_mt_set_cpuoptions ();
373 if (vpelimit > 0)
374 printk("Limit of %d VPEs set\n", vpelimit);
375 if (tclimit > 0)
376 printk("Limit of %d TCs set\n", tclimit);
377 if (nostlb) {
378 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
379 }
380 if (asidmask)
381 printk("ASID mask value override to 0x%x\n", asidmask);
382
383 /* Temporary */
Ralf Baechlec68644d2007-02-26 20:46:34 +0000384#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100385 if (hang_trig)
386 printk("Logic Analyser Trigger on suspected TC hang\n");
Ralf Baechlec68644d2007-02-26 20:46:34 +0000387#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100388
389 /* Put MVPE's into 'configuration state' */
390 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
391
392 val = read_c0_mvpconf0();
393 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
394 if (vpelimit > 0 && nvpe > vpelimit)
395 nvpe = vpelimit;
396 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
397 if (ntc > NR_CPUS)
398 ntc = NR_CPUS;
399 if (tclimit > 0 && ntc > tclimit)
400 ntc = tclimit;
401 tcpervpe = ntc / nvpe;
402 slop = ntc % nvpe; /* Residual TCs, < NVPE */
403
404 /* Set up shared TLB */
405 smtc_configure_tlb();
406
407 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
408 /*
409 * Set the MVP bits.
410 */
411 settc(tc);
412 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
413 if (vpe != 0)
414 printk(", ");
415 printk("VPE %d: TC", vpe);
416 for (i = 0; i < tcpervpe; i++) {
417 /*
418 * TC 0 is bound to VPE 0 at reset,
419 * and is presumably executing this
420 * code. Leave it alone!
421 */
422 if (tc != 0) {
423 smtc_tc_setup(vpe,tc, cpu);
424 cpu++;
425 }
426 printk(" %d", tc);
427 tc++;
428 }
429 if (slop) {
430 if (tc != 0) {
431 smtc_tc_setup(vpe,tc, cpu);
432 cpu++;
433 }
434 printk(" %d", tc);
435 tc++;
436 slop--;
437 }
438 if (vpe != 0) {
439 /*
440 * Clear any stale software interrupts from VPE's Cause
441 */
442 write_vpe_c0_cause(0);
443
444 /*
445 * Clear ERL/EXL of VPEs other than 0
446 * and set restricted interrupt enable/mask.
447 */
448 write_vpe_c0_status((read_vpe_c0_status()
449 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
450 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
451 | ST0_IE));
452 /*
453 * set config to be the same as vpe0,
454 * particularly kseg0 coherency alg
455 */
456 write_vpe_c0_config(read_c0_config());
457 /* Clear any pending timer interrupt */
458 write_vpe_c0_compare(0);
459 /* Propagate Config7 */
460 write_vpe_c0_config7(read_c0_config7());
Ralf Baechle64c590b2006-11-01 00:22:00 +0000461 write_vpe_c0_count(read_c0_count());
Ralf Baechle41c594a2006-04-05 09:45:45 +0100462 }
463 /* enable multi-threading within VPE */
464 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
465 /* enable the VPE */
466 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
467 }
468
469 /*
470 * Pull any physically present but unused TCs out of circulation.
471 */
472 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
473 cpu_clear(tc, phys_cpu_present_map);
474 cpu_clear(tc, cpu_present_map);
475 tc++;
476 }
477
478 /* release config state */
479 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
480
481 printk("\n");
482
483 /* Set up coprocessor affinity CPU mask(s) */
484
485 for (tc = 0; tc < ntc; tc++) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100486 if (cpu_data[tc].options & MIPS_CPU_FPU)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100487 cpu_set(tc, mt_fpu_cpumask);
488 }
489
490 /* set up ipi interrupts... */
491
492 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
493
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100494 setup_cross_vpe_interrupts(nvpe);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100495
496 /* Set up queue of free IPI "messages". */
497 nipi = NR_CPUS * IPIBUF_PER_CPU;
498 if (ipibuffers > 0)
499 nipi = ipibuffers;
500
501 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
502 if (pipi == NULL)
503 panic("kmalloc of IPI message buffers failed\n");
504 else
505 printk("IPI buffer pool of %d buffers\n", nipi);
506 for (i = 0; i < nipi; i++) {
507 smtc_ipi_nq(&freeIPIq, pipi);
508 pipi++;
509 }
510
511 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
512 emt(EMT_ENABLE);
513 evpe(EVPE_ENABLE);
514 local_irq_restore(flags);
515 /* Initialize SMTC /proc statistics/diagnostics */
516 init_smtc_stats();
517}
518
519
520/*
521 * Setup the PC, SP, and GP of a secondary processor and start it
522 * running!
523 * smp_bootstrap is the place to resume from
524 * __KSTK_TOS(idle) is apparently the stack pointer
525 * (unsigned long)idle->thread_info the gp
526 *
527 */
Ralf Baechlee119d492007-07-28 00:54:32 +0100528void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100529{
530 extern u32 kernelsp[NR_CPUS];
531 long flags;
532 int mtflags;
533
534 LOCK_MT_PRA();
535 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
536 dvpe();
537 }
538 settc(cpu_data[cpu].tc_id);
539
540 /* pc */
541 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
542
543 /* stack pointer */
544 kernelsp[cpu] = __KSTK_TOS(idle);
545 write_tc_gpr_sp(__KSTK_TOS(idle));
546
547 /* global pointer */
Roman Zippelc9f4f062007-05-09 02:35:16 -0700548 write_tc_gpr_gp((unsigned long)task_thread_info(idle));
Ralf Baechle41c594a2006-04-05 09:45:45 +0100549
550 smtc_status |= SMTC_MTC_ACTIVE;
551 write_tc_c0_tchalt(0);
552 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
553 evpe(EVPE_ENABLE);
554 }
555 UNLOCK_MT_PRA();
556}
557
558void smtc_init_secondary(void)
559{
560 /*
561 * Start timer on secondary VPEs if necessary.
Ralf Baechle54d0a212006-07-09 21:38:56 +0100562 * plat_timer_setup has already have been invoked by init/main
Ralf Baechle41c594a2006-04-05 09:45:45 +0100563 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
564 * SMTC init code assigns TCs consdecutively and in ascending order
565 * to across available VPEs.
566 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100567 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
568 ((read_c0_tcbind() & TCBIND_CURVPE)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100569 != cpu_data[smp_processor_id() - 1].vpe_id)){
570 write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
571 }
572
573 local_irq_enable();
574}
575
576void smtc_smp_finish(void)
577{
578 printk("TC %d going on-line as CPU %d\n",
579 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
580}
581
582void smtc_cpus_done(void)
583{
584}
585
586/*
587 * Support for SMTC-optimized driver IRQ registration
588 */
589
590/*
591 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
592 * in do_IRQ. These are passed in setup_irq_smtc() and stored
593 * in this table.
594 */
595
596int setup_irq_smtc(unsigned int irq, struct irqaction * new,
597 unsigned long hwmask)
598{
Ralf Baechleef36fc32007-05-31 13:36:57 +0100599#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100600 unsigned int vpe = current_cpu_data.vpe_id;
601
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +0100602 vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100603#endif
Ralf Baechleef36fc32007-05-31 13:36:57 +0100604 irq_hwmask[irq] = hwmask;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100605
606 return setup_irq(irq, new);
607}
608
Kevin D. Kissellf571eff2007-08-03 19:38:03 +0200609#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
610/*
611 * Support for IRQ affinity to TCs
612 */
613
614void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
615{
616 /*
617 * If a "fast path" cache of quickly decodable affinity state
618 * is maintained, this is where it gets done, on a call up
619 * from the platform affinity code.
620 */
621}
622
623void smtc_forward_irq(unsigned int irq)
624{
625 int target;
626
627 /*
628 * OK wise guy, now figure out how to get the IRQ
629 * to be serviced on an authorized "CPU".
630 *
631 * Ideally, to handle the situation where an IRQ has multiple
632 * eligible CPUS, we would maintain state per IRQ that would
633 * allow a fair distribution of service requests. Since the
634 * expected use model is any-or-only-one, for simplicity
635 * and efficiency, we just pick the easiest one to find.
636 */
637
638 target = first_cpu(irq_desc[irq].affinity);
639
640 /*
641 * We depend on the platform code to have correctly processed
642 * IRQ affinity change requests to ensure that the IRQ affinity
643 * mask has been purged of bits corresponding to nonexistent and
644 * offline "CPUs", and to TCs bound to VPEs other than the VPE
645 * connected to the physical interrupt input for the interrupt
646 * in question. Otherwise we have a nasty problem with interrupt
647 * mask management. This is best handled in non-performance-critical
648 * platform IRQ affinity setting code, to minimize interrupt-time
649 * checks.
650 */
651
652 /* If no one is eligible, service locally */
653 if (target >= NR_CPUS) {
654 do_IRQ_no_affinity(irq);
655 return;
656 }
657
658 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
659}
660
661#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
662
Ralf Baechle41c594a2006-04-05 09:45:45 +0100663/*
664 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
665 * Within a VPE one TC can interrupt another by different approaches.
666 * The easiest to get right would probably be to make all TCs except
667 * the target IXMT and set a software interrupt, but an IXMT-based
668 * scheme requires that a handler must run before a new IPI could
669 * be sent, which would break the "broadcast" loops in MIPS MT.
670 * A more gonzo approach within a VPE is to halt the TC, extract
671 * its Restart, Status, and a couple of GPRs, and program the Restart
672 * address to emulate an interrupt.
673 *
674 * Within a VPE, one can be confident that the target TC isn't in
675 * a critical EXL state when halted, since the write to the Halt
676 * register could not have issued on the writing thread if the
677 * halting thread had EXL set. So k0 and k1 of the target TC
678 * can be used by the injection code. Across VPEs, one can't
679 * be certain that the target TC isn't in a critical exception
680 * state. So we try a two-step process of sending a software
681 * interrupt to the target VPE, which either handles the event
682 * itself (if it was the target) or injects the event within
683 * the VPE.
684 */
685
Ralf Baechle58687562007-02-05 00:33:21 +0000686static void smtc_ipi_qdump(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100687{
688 int i;
689
690 for (i = 0; i < NR_CPUS ;i++) {
691 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
692 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
693 IPIQ[i].depth);
694 }
695}
696
697/*
698 * The standard atomic.h primitives don't quite do what we want
699 * here: We need an atomic add-and-return-previous-value (which
700 * could be done with atomic_add_return and a decrement) and an
701 * atomic set/zero-and-return-previous-value (which can't really
702 * be done with the atomic.h primitives). And since this is
703 * MIPS MT, we can assume that we have LL/SC.
704 */
705static __inline__ int atomic_postincrement(unsigned int *pv)
706{
707 unsigned long result;
708
709 unsigned long temp;
710
711 __asm__ __volatile__(
712 "1: ll %0, %2 \n"
713 " addu %1, %0, 1 \n"
714 " sc %1, %2 \n"
715 " beqz %1, 1b \n"
Ralf Baechled87d0c92007-10-11 23:45:58 +0100716 __WEAK_LLSC_MB
Ralf Baechle41c594a2006-04-05 09:45:45 +0100717 : "=&r" (result), "=&r" (temp), "=m" (*pv)
718 : "m" (*pv)
719 : "memory");
720
721 return result;
722}
723
Ralf Baechle41c594a2006-04-05 09:45:45 +0100724void smtc_send_ipi(int cpu, int type, unsigned int action)
725{
726 int tcstatus;
727 struct smtc_ipi *pipi;
728 long flags;
729 int mtflags;
730
731 if (cpu == smp_processor_id()) {
732 printk("Cannot Send IPI to self!\n");
733 return;
734 }
735 /* Set up a descriptor, to be delivered either promptly or queued */
736 pipi = smtc_ipi_dq(&freeIPIq);
737 if (pipi == NULL) {
738 bust_spinlocks(1);
739 mips_mt_regdump(dvpe());
740 panic("IPI Msg. Buffers Depleted\n");
741 }
742 pipi->type = type;
743 pipi->arg = (void *)action;
744 pipi->dest = cpu;
745 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
746 /* If not on same VPE, enqueue and send cross-VPE interupt */
747 smtc_ipi_nq(&IPIQ[cpu], pipi);
748 LOCK_CORE_PRA();
749 settc(cpu_data[cpu].tc_id);
750 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
751 UNLOCK_CORE_PRA();
752 } else {
753 /*
754 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
755 * since ASID shootdown on the other VPE may
756 * collide with this operation.
757 */
758 LOCK_CORE_PRA();
759 settc(cpu_data[cpu].tc_id);
760 /* Halt the targeted TC */
761 write_tc_c0_tchalt(TCHALT_H);
762 mips_ihb();
763
764 /*
765 * Inspect TCStatus - if IXMT is set, we have to queue
766 * a message. Otherwise, we set up the "interrupt"
767 * of the other TC
768 */
769 tcstatus = read_tc_c0_tcstatus();
770
771 if ((tcstatus & TCSTATUS_IXMT) != 0) {
772 /*
773 * Spin-waiting here can deadlock,
774 * so we queue the message for the target TC.
775 */
776 write_tc_c0_tchalt(0);
777 UNLOCK_CORE_PRA();
778 /* Try to reduce redundant timer interrupt messages */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100779 if (type == SMTC_CLOCK_TICK) {
780 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
Ralf Baechle41c594a2006-04-05 09:45:45 +0100781 smtc_ipi_nq(&freeIPIq, pipi);
782 return;
783 }
784 }
785 smtc_ipi_nq(&IPIQ[cpu], pipi);
786 } else {
787 post_direct_ipi(cpu, pipi);
788 write_tc_c0_tchalt(0);
789 UNLOCK_CORE_PRA();
790 }
791 }
792}
793
794/*
795 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
796 */
Ralf Baechle58687562007-02-05 00:33:21 +0000797static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100798{
799 struct pt_regs *kstack;
800 unsigned long tcstatus;
801 unsigned long tcrestart;
802 extern u32 kernelsp[NR_CPUS];
803 extern void __smtc_ipi_vector(void);
804
805 /* Extract Status, EPC from halted TC */
806 tcstatus = read_tc_c0_tcstatus();
807 tcrestart = read_tc_c0_tcrestart();
808 /* If TCRestart indicates a WAIT instruction, advance the PC */
809 if ((tcrestart & 0x80000000)
810 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
811 tcrestart += 4;
812 }
813 /*
814 * Save on TC's future kernel stack
815 *
816 * CU bit of Status is indicator that TC was
817 * already running on a kernel stack...
818 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100819 if (tcstatus & ST0_CU0) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100820 /* Note that this "- 1" is pointer arithmetic */
821 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
822 } else {
823 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
824 }
825
826 kstack->cp0_epc = (long)tcrestart;
827 /* Save TCStatus */
828 kstack->cp0_tcstatus = tcstatus;
829 /* Pass token of operation to be performed kernel stack pad area */
830 kstack->pad0[4] = (unsigned long)pipi;
831 /* Pass address of function to be called likewise */
832 kstack->pad0[5] = (unsigned long)&ipi_decode;
833 /* Set interrupt exempt and kernel mode */
834 tcstatus |= TCSTATUS_IXMT;
835 tcstatus &= ~TCSTATUS_TKSU;
836 write_tc_c0_tcstatus(tcstatus);
837 ehb();
838 /* Set TC Restart address to be SMTC IPI vector */
839 write_tc_c0_tcrestart(__smtc_ipi_vector);
840}
841
Ralf Baechle937a8012006-10-07 19:44:33 +0100842static void ipi_resched_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100843{
844 /* Return from interrupt should be enough to cause scheduler check */
845}
846
847
Ralf Baechle937a8012006-10-07 19:44:33 +0100848static void ipi_call_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100849{
850 /* Invoke generic function invocation code in smp.c */
851 smp_call_function_interrupt();
852}
853
Ralf Baechle937a8012006-10-07 19:44:33 +0100854void ipi_decode(struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100855{
856 void *arg_copy = pipi->arg;
857 int type_copy = pipi->type;
858 int dest_copy = pipi->dest;
859
860 smtc_ipi_nq(&freeIPIq, pipi);
861 switch (type_copy) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100862 case SMTC_CLOCK_TICK:
Ralf Baechleae036b72007-03-27 15:11:54 +0100863 irq_enter();
Chris Dearman8e15a0e2007-06-21 12:59:58 +0100864 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100865 /* Invoke Clock "Interrupt" */
866 ipi_timer_latch[dest_copy] = 0;
Ralf Baechlec68644d2007-02-26 20:46:34 +0000867#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100868 clock_hang_reported[dest_copy] = 0;
Ralf Baechlec68644d2007-02-26 20:46:34 +0000869#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle937a8012006-10-07 19:44:33 +0100870 local_timer_interrupt(0, NULL);
Ralf Baechleae036b72007-03-27 15:11:54 +0100871 irq_exit();
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100872 break;
873 case LINUX_SMP_IPI:
874 switch ((int)arg_copy) {
875 case SMP_RESCHEDULE_YOURSELF:
Ralf Baechle937a8012006-10-07 19:44:33 +0100876 ipi_resched_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100877 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100878 case SMP_CALL_FUNCTION:
Ralf Baechle937a8012006-10-07 19:44:33 +0100879 ipi_call_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100880 break;
881 default:
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100882 printk("Impossible SMTC IPI Argument 0x%x\n",
883 (int)arg_copy);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100884 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100885 }
886 break;
Kevin D. Kissellf571eff2007-08-03 19:38:03 +0200887#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
888 case IRQ_AFFINITY_IPI:
889 /*
890 * Accept a "forwarded" interrupt that was initially
891 * taken by a TC who doesn't have affinity for the IRQ.
892 */
893 do_IRQ_no_affinity((int)arg_copy);
894 break;
895#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100896 default:
897 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
898 break;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100899 }
900}
901
Ralf Baechle937a8012006-10-07 19:44:33 +0100902void deferred_smtc_ipi(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100903{
904 struct smtc_ipi *pipi;
905 unsigned long flags;
906/* DEBUG */
907 int q = smp_processor_id();
908
909 /*
910 * Test is not atomic, but much faster than a dequeue,
911 * and the vast majority of invocations will have a null queue.
912 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100913 if (IPIQ[q].head != NULL) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100914 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
915 /* ipi_decode() should be called with interrupts off */
916 local_irq_save(flags);
Ralf Baechle937a8012006-10-07 19:44:33 +0100917 ipi_decode(pipi);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100918 local_irq_restore(flags);
919 }
920 }
921}
922
923/*
924 * Send clock tick to all TCs except the one executing the funtion
925 */
926
Ralf Baechleefaa5342007-07-27 18:39:19 +0100927void smtc_timer_broadcast(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100928{
929 int cpu;
930 int myTC = cpu_data[smp_processor_id()].tc_id;
931 int myVPE = cpu_data[smp_processor_id()].vpe_id;
932
933 smtc_cpu_stats[smp_processor_id()].timerints++;
934
935 for_each_online_cpu(cpu) {
936 if (cpu_data[cpu].vpe_id == myVPE &&
937 cpu_data[cpu].tc_id != myTC)
938 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
939 }
940}
941
942/*
943 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
944 * set via cross-VPE MTTR manipulation of the Cause register. It would be
945 * in some regards preferable to have external logic for "doorbell" hardware
946 * interrupts.
947 */
948
Atsushi Nemoto97dcb822007-01-08 02:14:29 +0900949static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100950
Ralf Baechle937a8012006-10-07 19:44:33 +0100951static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100952{
953 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
954 int my_tc = cpu_data[smp_processor_id()].tc_id;
955 int cpu;
956 struct smtc_ipi *pipi;
957 unsigned long tcstatus;
958 int sent;
959 long flags;
960 unsigned int mtflags;
961 unsigned int vpflags;
962
963 /*
964 * So long as cross-VPE interrupts are done via
965 * MFTR/MTTR read-modify-writes of Cause, we need
966 * to stop other VPEs whenever the local VPE does
967 * anything similar.
968 */
969 local_irq_save(flags);
970 vpflags = dvpe();
971 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
972 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
973 irq_enable_hazard();
974 evpe(vpflags);
975 local_irq_restore(flags);
976
977 /*
978 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
979 * queued for TCs on this VPE other than the current one.
980 * Return-from-interrupt should cause us to drain the queue
981 * for the current TC, so we ought not to have to do it explicitly here.
982 */
983
984 for_each_online_cpu(cpu) {
985 if (cpu_data[cpu].vpe_id != my_vpe)
986 continue;
987
988 pipi = smtc_ipi_dq(&IPIQ[cpu]);
989 if (pipi != NULL) {
990 if (cpu_data[cpu].tc_id != my_tc) {
991 sent = 0;
992 LOCK_MT_PRA();
993 settc(cpu_data[cpu].tc_id);
994 write_tc_c0_tchalt(TCHALT_H);
995 mips_ihb();
996 tcstatus = read_tc_c0_tcstatus();
997 if ((tcstatus & TCSTATUS_IXMT) == 0) {
998 post_direct_ipi(cpu, pipi);
999 sent = 1;
1000 }
1001 write_tc_c0_tchalt(0);
1002 UNLOCK_MT_PRA();
1003 if (!sent) {
1004 smtc_ipi_req(&IPIQ[cpu], pipi);
1005 }
1006 } else {
1007 /*
1008 * ipi_decode() should be called
1009 * with interrupts off
1010 */
1011 local_irq_save(flags);
Ralf Baechle937a8012006-10-07 19:44:33 +01001012 ipi_decode(pipi);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001013 local_irq_restore(flags);
1014 }
1015 }
1016 }
1017
1018 return IRQ_HANDLED;
1019}
1020
Ralf Baechle937a8012006-10-07 19:44:33 +01001021static void ipi_irq_dispatch(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +01001022{
Ralf Baechle937a8012006-10-07 19:44:33 +01001023 do_IRQ(cpu_ipi_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001024}
1025
Ralf Baechle033890b2007-07-27 18:33:30 +01001026static struct irqaction irq_ipi = {
1027 .handler = ipi_interrupt,
1028 .flags = IRQF_DISABLED,
1029 .name = "SMTC_IPI",
1030 .flags = IRQF_PERCPU
1031};
Ralf Baechle41c594a2006-04-05 09:45:45 +01001032
Ralf Baechle20bb25d2007-03-27 15:19:58 +01001033static void setup_cross_vpe_interrupts(unsigned int nvpe)
Ralf Baechle41c594a2006-04-05 09:45:45 +01001034{
Ralf Baechle20bb25d2007-03-27 15:19:58 +01001035 if (nvpe < 1)
1036 return;
1037
Ralf Baechle41c594a2006-04-05 09:45:45 +01001038 if (!cpu_has_vint)
1039 panic("SMTC Kernel requires Vectored Interupt support");
1040
1041 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1042
Ralf Baechle41c594a2006-04-05 09:45:45 +01001043 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1044
Atsushi Nemoto14178362006-11-14 01:13:18 +09001045 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001046}
1047
1048/*
1049 * SMTC-specific hacks invoked from elsewhere in the kernel.
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001050 *
1051 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1052 * called with interrupts disabled. We do rely on interrupts being disabled
1053 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1054 * result in a recursive call to raw_local_irq_restore().
Ralf Baechle41c594a2006-04-05 09:45:45 +01001055 */
1056
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001057static void __smtc_ipi_replay(void)
Ralf Baechleac8be952007-01-20 00:18:01 +00001058{
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001059 unsigned int cpu = smp_processor_id();
1060
Ralf Baechleac8be952007-01-20 00:18:01 +00001061 /*
1062 * To the extent that we've ever turned interrupts off,
1063 * we may have accumulated deferred IPIs. This is subtle.
1064 * If we use the smtc_ipi_qdepth() macro, we'll get an
1065 * exact number - but we'll also disable interrupts
1066 * and create a window of failure where a new IPI gets
1067 * queued after we test the depth but before we re-enable
1068 * interrupts. So long as IXMT never gets set, however,
1069 * we should be OK: If we pick up something and dispatch
1070 * it here, that's great. If we see nothing, but concurrent
1071 * with this operation, another TC sends us an IPI, IXMT
1072 * is clear, and we'll handle it as a real pseudo-interrupt
1073 * and not a pseudo-pseudo interrupt.
1074 */
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001075 if (IPIQ[cpu].depth > 0) {
1076 while (1) {
1077 struct smtc_ipi_q *q = &IPIQ[cpu];
1078 struct smtc_ipi *pipi;
1079 extern void self_ipi(struct smtc_ipi *);
Ralf Baechleac8be952007-01-20 00:18:01 +00001080
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001081 spin_lock(&q->lock);
1082 pipi = __smtc_ipi_dq(q);
1083 spin_unlock(&q->lock);
1084 if (!pipi)
1085 break;
1086
Ralf Baechleac8be952007-01-20 00:18:01 +00001087 self_ipi(pipi);
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001088 smtc_cpu_stats[cpu].selfipis++;
Ralf Baechleac8be952007-01-20 00:18:01 +00001089 }
1090 }
1091}
1092
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001093void smtc_ipi_replay(void)
1094{
1095 raw_local_irq_disable();
1096 __smtc_ipi_replay();
1097}
1098
Ralf Baechleec43c012007-01-24 19:23:21 +00001099EXPORT_SYMBOL(smtc_ipi_replay);
1100
Ralf Baechle41c594a2006-04-05 09:45:45 +01001101void smtc_idle_loop_hook(void)
1102{
Ralf Baechlec68644d2007-02-26 20:46:34 +00001103#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +01001104 int im;
1105 int flags;
1106 int mtflags;
1107 int bit;
1108 int vpe;
1109 int tc;
1110 int hook_ntcs;
1111 /*
1112 * printk within DMT-protected regions can deadlock,
1113 * so buffer diagnostic messages for later output.
1114 */
1115 char *pdb_msg;
1116 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1117
1118 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1119 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1120 int mvpconf0;
1121 /* Tedious stuff to just do once */
1122 mvpconf0 = read_c0_mvpconf0();
1123 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1124 if (hook_ntcs > NR_CPUS)
1125 hook_ntcs = NR_CPUS;
1126 for (tc = 0; tc < hook_ntcs; tc++) {
1127 tcnoprog[tc] = 0;
1128 clock_hang_reported[tc] = 0;
1129 }
1130 for (vpe = 0; vpe < 2; vpe++)
1131 for (im = 0; im < 8; im++)
1132 imstuckcount[vpe][im] = 0;
1133 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1134 atomic_set(&idle_hook_initialized, 1000);
1135 } else {
1136 /* Someone else is initializing in parallel - let 'em finish */
1137 while (atomic_read(&idle_hook_initialized) < 1000)
1138 ;
1139 }
1140 }
1141
1142 /* Have we stupidly left IXMT set somewhere? */
1143 if (read_c0_tcstatus() & 0x400) {
1144 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1145 ehb();
1146 printk("Dangling IXMT in cpu_idle()\n");
1147 }
1148
1149 /* Have we stupidly left an IM bit turned off? */
1150#define IM_LIMIT 2000
1151 local_irq_save(flags);
1152 mtflags = dmt();
1153 pdb_msg = &id_ho_db_msg[0];
1154 im = read_c0_status();
Ralf Baechle8f8771a2007-07-10 17:32:56 +01001155 vpe = current_cpu_data.vpe_id;
Ralf Baechle41c594a2006-04-05 09:45:45 +01001156 for (bit = 0; bit < 8; bit++) {
1157 /*
1158 * In current prototype, I/O interrupts
1159 * are masked for VPE > 0
1160 */
1161 if (vpemask[vpe][bit]) {
1162 if (!(im & (0x100 << bit)))
1163 imstuckcount[vpe][bit]++;
1164 else
1165 imstuckcount[vpe][bit] = 0;
1166 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1167 set_c0_status(0x100 << bit);
1168 ehb();
1169 imstuckcount[vpe][bit] = 0;
1170 pdb_msg += sprintf(pdb_msg,
1171 "Dangling IM %d fixed for VPE %d\n", bit,
1172 vpe);
1173 }
1174 }
1175 }
1176
1177 /*
1178 * Now that we limit outstanding timer IPIs, check for hung TC
1179 */
1180 for (tc = 0; tc < NR_CPUS; tc++) {
1181 /* Don't check ourself - we'll dequeue IPIs just below */
1182 if ((tc != smp_processor_id()) &&
1183 ipi_timer_latch[tc] > timerq_limit) {
1184 if (clock_hang_reported[tc] == 0) {
1185 pdb_msg += sprintf(pdb_msg,
1186 "TC %d looks hung with timer latch at %d\n",
1187 tc, ipi_timer_latch[tc]);
1188 clock_hang_reported[tc]++;
1189 }
1190 }
1191 }
1192 emt(mtflags);
1193 local_irq_restore(flags);
1194 if (pdb_msg != &id_ho_db_msg[0])
1195 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
Ralf Baechlec68644d2007-02-26 20:46:34 +00001196#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +01001197
Ralf Baechleac8be952007-01-20 00:18:01 +00001198 /*
1199 * Replay any accumulated deferred IPIs. If "Instant Replay"
1200 * is in use, there should never be any.
1201 */
1202#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001203 {
1204 unsigned long flags;
1205
1206 local_irq_save(flags);
1207 __smtc_ipi_replay();
1208 local_irq_restore(flags);
1209 }
Ralf Baechleac8be952007-01-20 00:18:01 +00001210#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
Ralf Baechle41c594a2006-04-05 09:45:45 +01001211}
1212
1213void smtc_soft_dump(void)
1214{
1215 int i;
1216
1217 printk("Counter Interrupts taken per CPU (TC)\n");
1218 for (i=0; i < NR_CPUS; i++) {
1219 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1220 }
1221 printk("Self-IPI invocations:\n");
1222 for (i=0; i < NR_CPUS; i++) {
1223 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1224 }
1225 smtc_ipi_qdump();
1226 printk("Timer IPI Backlogs:\n");
1227 for (i=0; i < NR_CPUS; i++) {
1228 printk("%d: %d\n", i, ipi_timer_latch[i]);
1229 }
1230 printk("%d Recoveries of \"stolen\" FPU\n",
1231 atomic_read(&smtc_fpu_recoveries));
1232}
1233
1234
1235/*
1236 * TLB management routines special to SMTC
1237 */
1238
1239void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1240{
1241 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1242 int tlb, i;
1243
1244 /*
1245 * It would be nice to be able to use a spinlock here,
1246 * but this is invoked from within TLB flush routines
1247 * that protect themselves with DVPE, so if a lock is
Ralf Baechlee0daad42007-02-05 00:10:11 +00001248 * held by another TC, it'll never be freed.
Ralf Baechle41c594a2006-04-05 09:45:45 +01001249 *
1250 * DVPE/DMT must not be done with interrupts enabled,
1251 * so even so most callers will already have disabled
1252 * them, let's be really careful...
1253 */
1254
1255 local_irq_save(flags);
1256 if (smtc_status & SMTC_TLB_SHARED) {
1257 mtflags = dvpe();
1258 tlb = 0;
1259 } else {
1260 mtflags = dmt();
1261 tlb = cpu_data[cpu].vpe_id;
1262 }
1263 asid = asid_cache(cpu);
1264
1265 do {
1266 if (!((asid += ASID_INC) & ASID_MASK) ) {
1267 if (cpu_has_vtag_icache)
1268 flush_icache_all();
1269 /* Traverse all online CPUs (hack requires contigous range) */
1270 for (i = 0; i < num_online_cpus(); i++) {
1271 /*
1272 * We don't need to worry about our own CPU, nor those of
1273 * CPUs who don't share our TLB.
1274 */
1275 if ((i != smp_processor_id()) &&
1276 ((smtc_status & SMTC_TLB_SHARED) ||
1277 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1278 settc(cpu_data[i].tc_id);
1279 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1280 if (!prevhalt) {
1281 write_tc_c0_tchalt(TCHALT_H);
1282 mips_ihb();
1283 }
1284 tcstat = read_tc_c0_tcstatus();
1285 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1286 if (!prevhalt)
1287 write_tc_c0_tchalt(0);
1288 }
1289 }
1290 if (!asid) /* fix version if needed */
1291 asid = ASID_FIRST_VERSION;
1292 local_flush_tlb_all(); /* start new asid cycle */
1293 }
1294 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1295
1296 /*
1297 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1298 */
1299 for (i = 0; i < num_online_cpus(); i++) {
1300 if ((smtc_status & SMTC_TLB_SHARED) ||
1301 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1302 cpu_context(i, mm) = asid_cache(i) = asid;
1303 }
1304
1305 if (smtc_status & SMTC_TLB_SHARED)
1306 evpe(mtflags);
1307 else
1308 emt(mtflags);
1309 local_irq_restore(flags);
1310}
1311
1312/*
1313 * Invoked from macros defined in mmu_context.h
1314 * which must already have disabled interrupts
1315 * and done a DVPE or DMT as appropriate.
1316 */
1317
1318void smtc_flush_tlb_asid(unsigned long asid)
1319{
1320 int entry;
1321 unsigned long ehi;
1322
1323 entry = read_c0_wired();
1324
1325 /* Traverse all non-wired entries */
1326 while (entry < current_cpu_data.tlbsize) {
1327 write_c0_index(entry);
1328 ehb();
1329 tlb_read();
1330 ehb();
1331 ehi = read_c0_entryhi();
Ralf Baechle4bf42d42006-07-08 11:32:58 +01001332 if ((ehi & ASID_MASK) == asid) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001333 /*
1334 * Invalidate only entries with specified ASID,
1335 * makiing sure all entries differ.
1336 */
1337 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1338 write_c0_entrylo0(0);
1339 write_c0_entrylo1(0);
1340 mtc0_tlbw_hazard();
1341 tlb_write_indexed();
1342 }
1343 entry++;
1344 }
1345 write_c0_index(PARKED_INDEX);
1346 tlbw_use_hazard();
1347}
1348
1349/*
1350 * Support for single-threading cache flush operations.
1351 */
1352
Ralf Baechle58687562007-02-05 00:33:21 +00001353static int halt_state_save[NR_CPUS];
Ralf Baechle41c594a2006-04-05 09:45:45 +01001354
1355/*
1356 * To really, really be sure that nothing is being done
1357 * by other TCs, halt them all. This code assumes that
1358 * a DVPE has already been done, so while their Halted
1359 * state is theoretically architecturally unstable, in
1360 * practice, it's not going to change while we're looking
1361 * at it.
1362 */
1363
1364void smtc_cflush_lockdown(void)
1365{
1366 int cpu;
1367
1368 for_each_online_cpu(cpu) {
1369 if (cpu != smp_processor_id()) {
1370 settc(cpu_data[cpu].tc_id);
1371 halt_state_save[cpu] = read_tc_c0_tchalt();
1372 write_tc_c0_tchalt(TCHALT_H);
1373 }
1374 }
1375 mips_ihb();
1376}
1377
1378/* It would be cheating to change the cpu_online states during a flush! */
1379
1380void smtc_cflush_release(void)
1381{
1382 int cpu;
1383
1384 /*
1385 * Start with a hazard barrier to ensure
1386 * that all CACHE ops have played through.
1387 */
1388 mips_ihb();
1389
1390 for_each_online_cpu(cpu) {
1391 if (cpu != smp_processor_id()) {
1392 settc(cpu_data[cpu].tc_id);
1393 write_tc_c0_tchalt(halt_state_save[cpu]);
1394 }
1395 }
1396 mips_ihb();
1397}