blob: 5f5af7d4c890791929291b35c2f0371484d34657 [file] [log] [blame]
Kevin D. Kissell8531a352008-09-09 21:48:52 +02001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2004 Mips Technologies, Inc
17 * Copyright (C) 2008 Kevin D. Kissell
18 */
Ralf Baechle41c594a2006-04-05 09:45:45 +010019
Ralf Baechleea580402007-10-11 23:46:09 +010020#include <linux/clockchips.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010021#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/cpumask.h>
24#include <linux/interrupt.h>
Ralf Baechleae036b72007-03-27 15:11:54 +010025#include <linux/kernel_stat.h>
Ralf Baechleec43c012007-01-24 19:23:21 +000026#include <linux/module.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010027
28#include <asm/cpu.h>
29#include <asm/processor.h>
30#include <asm/atomic.h>
31#include <asm/system.h>
32#include <asm/hardirq.h>
33#include <asm/hazards.h>
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +010034#include <asm/irq.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010035#include <asm/mmu_context.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010036#include <asm/mipsregs.h>
37#include <asm/cacheflush.h>
38#include <asm/time.h>
39#include <asm/addrspace.h>
40#include <asm/smtc.h>
Ralf Baechle41c594a2006-04-05 09:45:45 +010041#include <asm/smtc_proc.h>
42
43/*
Ralf Baechle1146fe32007-09-21 17:13:55 +010044 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
45 * in do_IRQ. These are passed in setup_irq_smtc() and stored
46 * in this table.
Ralf Baechle41c594a2006-04-05 09:45:45 +010047 */
Ralf Baechle1146fe32007-09-21 17:13:55 +010048unsigned long irq_hwmask[NR_IRQS];
Ralf Baechle41c594a2006-04-05 09:45:45 +010049
Ralf Baechle41c594a2006-04-05 09:45:45 +010050#define LOCK_MT_PRA() \
51 local_irq_save(flags); \
52 mtflags = dmt()
53
54#define UNLOCK_MT_PRA() \
55 emt(mtflags); \
56 local_irq_restore(flags)
57
58#define LOCK_CORE_PRA() \
59 local_irq_save(flags); \
60 mtflags = dvpe()
61
62#define UNLOCK_CORE_PRA() \
63 evpe(mtflags); \
64 local_irq_restore(flags)
65
66/*
67 * Data structures purely associated with SMTC parallelism
68 */
69
70
71/*
72 * Table for tracking ASIDs whose lifetime is prolonged.
73 */
74
75asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
76
Ralf Baechle41c594a2006-04-05 09:45:45 +010077
78/*
Joe Perches603e82e2008-02-03 16:54:53 +020079 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
Ralf Baechle41c594a2006-04-05 09:45:45 +010080 */
81
82#define IPIBUF_PER_CPU 4
83
Kevin D. Kisselld2bb01b2008-09-09 21:35:01 +020084struct smtc_ipi_q IPIQ[NR_CPUS];
Ralf Baechle58687562007-02-05 00:33:21 +000085static struct smtc_ipi_q freeIPIq;
Ralf Baechle41c594a2006-04-05 09:45:45 +010086
87
88/* Forward declarations */
89
Ralf Baechle937a8012006-10-07 19:44:33 +010090void ipi_decode(struct smtc_ipi *);
Ralf Baechle58687562007-02-05 00:33:21 +000091static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
Ralf Baechle20bb25d2007-03-27 15:19:58 +010092static void setup_cross_vpe_interrupts(unsigned int nvpe);
Ralf Baechle41c594a2006-04-05 09:45:45 +010093void init_smtc_stats(void);
94
95/* Global SMTC Status */
96
97unsigned int smtc_status = 0;
98
99/* Boot command line configuration overrides */
100
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100101static int vpe0limit;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100102static int ipibuffers = 0;
103static int nostlb = 0;
104static int asidmask = 0;
105unsigned long smtc_asid_mask = 0xff;
106
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100107static int __init vpe0tcs(char *str)
108{
109 get_option(&str, &vpe0limit);
110
111 return 1;
112}
113
Ralf Baechle41c594a2006-04-05 09:45:45 +0100114static int __init ipibufs(char *str)
115{
116 get_option(&str, &ipibuffers);
117 return 1;
118}
119
120static int __init stlb_disable(char *s)
121{
122 nostlb = 1;
123 return 1;
124}
125
126static int __init asidmask_set(char *str)
127{
128 get_option(&str, &asidmask);
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100129 switch (asidmask) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100130 case 0x1:
131 case 0x3:
132 case 0x7:
133 case 0xf:
134 case 0x1f:
135 case 0x3f:
136 case 0x7f:
137 case 0xff:
138 smtc_asid_mask = (unsigned long)asidmask;
139 break;
140 default:
141 printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
142 }
143 return 1;
144}
145
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100146__setup("vpe0tcs=", vpe0tcs);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100147__setup("ipibufs=", ipibufs);
148__setup("nostlb", stlb_disable);
149__setup("asidmask=", asidmask_set);
150
Ralf Baechlec68644d2007-02-26 20:46:34 +0000151#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100152
153static int hang_trig = 0;
154
155static int __init hangtrig_enable(char *s)
156{
157 hang_trig = 1;
158 return 1;
159}
160
161
162__setup("hangtrig", hangtrig_enable);
163
164#define DEFAULT_BLOCKED_IPI_LIMIT 32
165
166static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
167
168static int __init tintq(char *str)
169{
170 get_option(&str, &timerq_limit);
171 return 1;
172}
173
174__setup("tintq=", tintq);
175
Ralf Baechle97aef632007-07-27 18:36:32 +0100176static int imstuckcount[2][8];
Ralf Baechle41c594a2006-04-05 09:45:45 +0100177/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
Ralf Baechle97aef632007-07-27 18:36:32 +0100178static int vpemask[2][8] = {
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100179 {0, 0, 1, 0, 0, 0, 0, 1},
180 {0, 0, 0, 0, 0, 0, 0, 1}
181};
Ralf Baechle41c594a2006-04-05 09:45:45 +0100182int tcnoprog[NR_CPUS];
183static atomic_t idle_hook_initialized = {0};
184static int clock_hang_reported[NR_CPUS];
185
Ralf Baechlec68644d2007-02-26 20:46:34 +0000186#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100187
Ralf Baechle41c594a2006-04-05 09:45:45 +0100188/*
189 * Configure shared TLB - VPC configuration bit must be set by caller
190 */
191
Ralf Baechle58687562007-02-05 00:33:21 +0000192static void smtc_configure_tlb(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100193{
Ralf Baechle21a151d2007-10-11 23:46:15 +0100194 int i, tlbsiz, vpes;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100195 unsigned long mvpconf0;
196 unsigned long config1val;
197
198 /* Set up ASID preservation table */
199 for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
200 for(i = 0; i < MAX_SMTC_ASIDS; i++) {
201 smtc_live_asid[vpes][i] = 0;
202 }
203 }
204 mvpconf0 = read_c0_mvpconf0();
205
206 if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
207 >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
208 /* If we have multiple VPEs, try to share the TLB */
209 if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
210 /*
211 * If TLB sizing is programmable, shared TLB
212 * size is the total available complement.
213 * Otherwise, we have to take the sum of all
214 * static VPE TLB entries.
215 */
216 if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
217 >> MVPCONF0_PTLBE_SHIFT)) == 0) {
218 /*
219 * If there's more than one VPE, there had better
220 * be more than one TC, because we need one to bind
221 * to each VPE in turn to be able to read
222 * its configuration state!
223 */
224 settc(1);
225 /* Stop the TC from doing anything foolish */
226 write_tc_c0_tchalt(TCHALT_H);
227 mips_ihb();
228 /* No need to un-Halt - that happens later anyway */
229 for (i=0; i < vpes; i++) {
230 write_tc_c0_tcbind(i);
231 /*
232 * To be 100% sure we're really getting the right
233 * information, we exit the configuration state
234 * and do an IHB after each rebinding.
235 */
236 write_c0_mvpcontrol(
237 read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
238 mips_ihb();
239 /*
240 * Only count if the MMU Type indicated is TLB
241 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100242 if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100243 config1val = read_vpe_c0_config1();
244 tlbsiz += ((config1val >> 25) & 0x3f) + 1;
245 }
246
247 /* Put core back in configuration state */
248 write_c0_mvpcontrol(
249 read_c0_mvpcontrol() | MVPCONTROL_VPC );
250 mips_ihb();
251 }
252 }
253 write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
Ralf Baechlec80697b2007-01-17 18:58:44 +0000254 ehb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100255
256 /*
257 * Setup kernel data structures to use software total,
258 * rather than read the per-VPE Config1 value. The values
259 * for "CPU 0" gets copied to all the other CPUs as part
260 * of their initialization in smtc_cpu_setup().
261 */
262
Ralf Baechlea0b62182007-01-19 14:35:14 +0000263 /* MIPS32 limits TLB indices to 64 */
264 if (tlbsiz > 64)
265 tlbsiz = 64;
266 cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100267 smtc_status |= SMTC_TLB_SHARED;
Ralf Baechlea0b62182007-01-19 14:35:14 +0000268 local_flush_tlb_all();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100269
270 printk("TLB of %d entry pairs shared by %d VPEs\n",
271 tlbsiz, vpes);
272 } else {
273 printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
274 }
275 }
276}
277
278
279/*
280 * Incrementally build the CPU map out of constituent MIPS MT cores,
281 * using the specified available VPEs and TCs. Plaform code needs
282 * to ensure that each MIPS MT core invokes this routine on reset,
283 * one at a time(!).
284 *
285 * This version of the build_cpu_map and prepare_cpus routines assumes
286 * that *all* TCs of a MIPS MT core will be used for Linux, and that
287 * they will be spread across *all* available VPEs (to minimise the
288 * loss of efficiency due to exception service serialization).
289 * An improved version would pick up configuration information and
290 * possibly leave some TCs/VPEs as "slave" processors.
291 *
292 * Use c0_MVPConf0 to find out how many TCs are available, setting up
Rusty Russell98a79d62008-12-13 21:19:41 +1030293 * cpu_possible_map and the logical/physical mappings.
Ralf Baechle41c594a2006-04-05 09:45:45 +0100294 */
295
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200296int __init smtc_build_cpu_map(int start_cpu_slot)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100297{
298 int i, ntcs;
299
300 /*
301 * The CPU map isn't actually used for anything at this point,
302 * so it's not clear what else we should do apart from set
303 * everything up so that "logical" = "physical".
304 */
305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
Rusty Russell98a79d62008-12-13 21:19:41 +1030307 cpu_set(i, cpu_possible_map);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100308 __cpu_number_map[i] = i;
309 __cpu_logical_map[i] = i;
310 }
Ralf Baechleea580402007-10-11 23:46:09 +0100311#ifdef CONFIG_MIPS_MT_FPAFF
Ralf Baechle41c594a2006-04-05 09:45:45 +0100312 /* Initialize map of CPUs with FPUs */
313 cpus_clear(mt_fpu_cpumask);
Ralf Baechleea580402007-10-11 23:46:09 +0100314#endif
Ralf Baechle41c594a2006-04-05 09:45:45 +0100315
316 /* One of those TC's is the one booting, and not a secondary... */
317 printk("%i available secondary CPU TC(s)\n", i - 1);
318
319 return i;
320}
321
322/*
323 * Common setup before any secondaries are started
324 * Make sure all CPU's are in a sensible state before we boot any of the
325 * secondaries.
326 *
327 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
328 * as possible across the available VPEs.
329 */
330
331static void smtc_tc_setup(int vpe, int tc, int cpu)
332{
333 settc(tc);
334 write_tc_c0_tchalt(TCHALT_H);
335 mips_ihb();
336 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
337 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
338 | TCSTATUS_A);
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200339 /*
340 * TCContext gets an offset from the base of the IPIQ array
341 * to be used in low-level code to detect the presence of
342 * an active IPI queue
343 */
344 write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100345 /* Bind tc to vpe */
346 write_tc_c0_tcbind(vpe);
347 /* In general, all TCs should have the same cpu_data indications */
348 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
349 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
Ralf Baechle39b8d522008-04-28 17:14:26 +0100350 if (cpu_data[0].cputype == CPU_34K ||
351 cpu_data[0].cputype == CPU_1004K)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100352 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
353 cpu_data[cpu].vpe_id = vpe;
354 cpu_data[cpu].tc_id = tc;
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200355 /* Multi-core SMTC hasn't been tested, but be prepared */
356 cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100357}
358
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200359/*
360 * Tweak to get Count registes in as close a sync as possible.
361 * Value seems good for 34K-class cores.
362 */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100363
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200364#define CP0_SKEW 8
365
366void smtc_prepare_cpus(int cpus)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100367{
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100368 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100369 unsigned long flags;
370 unsigned long val;
371 int nipi;
372 struct smtc_ipi *pipi;
373
374 /* disable interrupts so we can disable MT */
375 local_irq_save(flags);
376 /* disable MT so we can configure */
377 dvpe();
378 dmt();
379
Ingo Molnar34af9462006-06-27 02:53:55 -0700380 spin_lock_init(&freeIPIq.lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100381
382 /*
383 * We probably don't have as many VPEs as we do SMP "CPUs",
384 * but it's possible - and in any case we'll never use more!
385 */
386 for (i=0; i<NR_CPUS; i++) {
387 IPIQ[i].head = IPIQ[i].tail = NULL;
Ingo Molnar34af9462006-06-27 02:53:55 -0700388 spin_lock_init(&IPIQ[i].lock);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100389 IPIQ[i].depth = 0;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100390 }
391
392 /* cpu_data index starts at zero */
393 cpu = 0;
394 cpu_data[cpu].vpe_id = 0;
395 cpu_data[cpu].tc_id = 0;
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200396 cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100397 cpu++;
398
399 /* Report on boot-time options */
Ralf Baechle49a89ef2007-10-11 23:46:15 +0100400 mips_mt_set_cpuoptions();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100401 if (vpelimit > 0)
402 printk("Limit of %d VPEs set\n", vpelimit);
403 if (tclimit > 0)
404 printk("Limit of %d TCs set\n", tclimit);
405 if (nostlb) {
406 printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
407 }
408 if (asidmask)
409 printk("ASID mask value override to 0x%x\n", asidmask);
410
411 /* Temporary */
Ralf Baechlec68644d2007-02-26 20:46:34 +0000412#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +0100413 if (hang_trig)
414 printk("Logic Analyser Trigger on suspected TC hang\n");
Ralf Baechlec68644d2007-02-26 20:46:34 +0000415#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100416
417 /* Put MVPE's into 'configuration state' */
418 write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
419
420 val = read_c0_mvpconf0();
421 nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
422 if (vpelimit > 0 && nvpe > vpelimit)
423 nvpe = vpelimit;
424 ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
425 if (ntc > NR_CPUS)
426 ntc = NR_CPUS;
427 if (tclimit > 0 && ntc > tclimit)
428 ntc = tclimit;
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100429 slop = ntc % nvpe;
430 for (i = 0; i < nvpe; i++) {
431 tcpervpe[i] = ntc / nvpe;
432 if (slop) {
433 if((slop - i) > 0) tcpervpe[i]++;
434 }
435 }
436 /* Handle command line override for VPE0 */
437 if (vpe0limit > ntc) vpe0limit = ntc;
438 if (vpe0limit > 0) {
439 int slopslop;
440 if (vpe0limit < tcpervpe[0]) {
441 /* Reducing TC count - distribute to others */
442 slop = tcpervpe[0] - vpe0limit;
443 slopslop = slop % (nvpe - 1);
444 tcpervpe[0] = vpe0limit;
445 for (i = 1; i < nvpe; i++) {
446 tcpervpe[i] += slop / (nvpe - 1);
447 if(slopslop && ((slopslop - (i - 1) > 0)))
448 tcpervpe[i]++;
449 }
450 } else if (vpe0limit > tcpervpe[0]) {
451 /* Increasing TC count - steal from others */
452 slop = vpe0limit - tcpervpe[0];
453 slopslop = slop % (nvpe - 1);
454 tcpervpe[0] = vpe0limit;
455 for (i = 1; i < nvpe; i++) {
456 tcpervpe[i] -= slop / (nvpe - 1);
457 if(slopslop && ((slopslop - (i - 1) > 0)))
458 tcpervpe[i]--;
459 }
460 }
461 }
Ralf Baechle41c594a2006-04-05 09:45:45 +0100462
463 /* Set up shared TLB */
464 smtc_configure_tlb();
465
466 for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
467 /*
468 * Set the MVP bits.
469 */
470 settc(tc);
471 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
472 if (vpe != 0)
473 printk(", ");
474 printk("VPE %d: TC", vpe);
Kevin D. Kissellbe5f1f22007-03-21 13:28:37 +0100475 for (i = 0; i < tcpervpe[vpe]; i++) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100476 /*
477 * TC 0 is bound to VPE 0 at reset,
478 * and is presumably executing this
479 * code. Leave it alone!
480 */
481 if (tc != 0) {
Ralf Baechle21a151d2007-10-11 23:46:15 +0100482 smtc_tc_setup(vpe, tc, cpu);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100483 cpu++;
484 }
485 printk(" %d", tc);
486 tc++;
487 }
Ralf Baechle41c594a2006-04-05 09:45:45 +0100488 if (vpe != 0) {
489 /*
490 * Clear any stale software interrupts from VPE's Cause
491 */
492 write_vpe_c0_cause(0);
493
494 /*
495 * Clear ERL/EXL of VPEs other than 0
496 * and set restricted interrupt enable/mask.
497 */
498 write_vpe_c0_status((read_vpe_c0_status()
499 & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
500 | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
501 | ST0_IE));
502 /*
503 * set config to be the same as vpe0,
504 * particularly kseg0 coherency alg
505 */
506 write_vpe_c0_config(read_c0_config());
507 /* Clear any pending timer interrupt */
508 write_vpe_c0_compare(0);
509 /* Propagate Config7 */
510 write_vpe_c0_config7(read_c0_config7());
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200511 write_vpe_c0_count(read_c0_count() + CP0_SKEW);
512 ehb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100513 }
514 /* enable multi-threading within VPE */
515 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
516 /* enable the VPE */
517 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
518 }
519
520 /*
521 * Pull any physically present but unused TCs out of circulation.
522 */
523 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
Rusty Russell98a79d62008-12-13 21:19:41 +1030524 cpu_clear(tc, cpu_possible_map);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100525 cpu_clear(tc, cpu_present_map);
526 tc++;
527 }
528
529 /* release config state */
530 write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
531
532 printk("\n");
533
534 /* Set up coprocessor affinity CPU mask(s) */
535
Ralf Baechleea580402007-10-11 23:46:09 +0100536#ifdef CONFIG_MIPS_MT_FPAFF
Ralf Baechle41c594a2006-04-05 09:45:45 +0100537 for (tc = 0; tc < ntc; tc++) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100538 if (cpu_data[tc].options & MIPS_CPU_FPU)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100539 cpu_set(tc, mt_fpu_cpumask);
540 }
Ralf Baechleea580402007-10-11 23:46:09 +0100541#endif
Ralf Baechle41c594a2006-04-05 09:45:45 +0100542
543 /* set up ipi interrupts... */
544
545 /* If we have multiple VPEs running, set up the cross-VPE interrupt */
546
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100547 setup_cross_vpe_interrupts(nvpe);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100548
549 /* Set up queue of free IPI "messages". */
550 nipi = NR_CPUS * IPIBUF_PER_CPU;
551 if (ipibuffers > 0)
552 nipi = ipibuffers;
553
554 pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
555 if (pipi == NULL)
556 panic("kmalloc of IPI message buffers failed\n");
557 else
558 printk("IPI buffer pool of %d buffers\n", nipi);
559 for (i = 0; i < nipi; i++) {
560 smtc_ipi_nq(&freeIPIq, pipi);
561 pipi++;
562 }
563
564 /* Arm multithreading and enable other VPEs - but all TCs are Halted */
565 emt(EMT_ENABLE);
566 evpe(EVPE_ENABLE);
567 local_irq_restore(flags);
568 /* Initialize SMTC /proc statistics/diagnostics */
569 init_smtc_stats();
570}
571
572
573/*
574 * Setup the PC, SP, and GP of a secondary processor and start it
575 * running!
576 * smp_bootstrap is the place to resume from
577 * __KSTK_TOS(idle) is apparently the stack pointer
578 * (unsigned long)idle->thread_info the gp
579 *
580 */
Ralf Baechlee119d492007-07-28 00:54:32 +0100581void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100582{
583 extern u32 kernelsp[NR_CPUS];
Ralf Baechleb7e42262008-10-01 21:52:41 +0100584 unsigned long flags;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100585 int mtflags;
586
587 LOCK_MT_PRA();
588 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
589 dvpe();
590 }
591 settc(cpu_data[cpu].tc_id);
592
593 /* pc */
594 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
595
596 /* stack pointer */
597 kernelsp[cpu] = __KSTK_TOS(idle);
598 write_tc_gpr_sp(__KSTK_TOS(idle));
599
600 /* global pointer */
Roman Zippelc9f4f062007-05-09 02:35:16 -0700601 write_tc_gpr_gp((unsigned long)task_thread_info(idle));
Ralf Baechle41c594a2006-04-05 09:45:45 +0100602
603 smtc_status |= SMTC_MTC_ACTIVE;
604 write_tc_c0_tchalt(0);
605 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
606 evpe(EVPE_ENABLE);
607 }
608 UNLOCK_MT_PRA();
609}
610
611void smtc_init_secondary(void)
612{
Ralf Baechle41c594a2006-04-05 09:45:45 +0100613 local_irq_enable();
614}
615
616void smtc_smp_finish(void)
617{
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200618 int cpu = smp_processor_id();
619
620 /*
621 * Lowest-numbered CPU per VPE starts a clock tick.
622 * Like per_cpu_trap_init() hack, this assumes that
623 * SMTC init code assigns TCs consdecutively and
624 * in ascending order across available VPEs.
625 */
626 if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
627 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
628
Ralf Baechle41c594a2006-04-05 09:45:45 +0100629 printk("TC %d going on-line as CPU %d\n",
630 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
631}
632
633void smtc_cpus_done(void)
634{
635}
636
637/*
638 * Support for SMTC-optimized driver IRQ registration
639 */
640
641/*
642 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
643 * in do_IRQ. These are passed in setup_irq_smtc() and stored
644 * in this table.
645 */
646
647int setup_irq_smtc(unsigned int irq, struct irqaction * new,
648 unsigned long hwmask)
649{
Ralf Baechleef36fc32007-05-31 13:36:57 +0100650#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100651 unsigned int vpe = current_cpu_data.vpe_id;
652
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +0100653 vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
Ralf Baechle20bb25d2007-03-27 15:19:58 +0100654#endif
Ralf Baechleef36fc32007-05-31 13:36:57 +0100655 irq_hwmask[irq] = hwmask;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100656
657 return setup_irq(irq, new);
658}
659
Kevin D. Kissellf571eff2007-08-03 19:38:03 +0200660#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
661/*
662 * Support for IRQ affinity to TCs
663 */
664
665void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
666{
667 /*
668 * If a "fast path" cache of quickly decodable affinity state
669 * is maintained, this is where it gets done, on a call up
670 * from the platform affinity code.
671 */
672}
673
674void smtc_forward_irq(unsigned int irq)
675{
676 int target;
677
678 /*
679 * OK wise guy, now figure out how to get the IRQ
680 * to be serviced on an authorized "CPU".
681 *
682 * Ideally, to handle the situation where an IRQ has multiple
683 * eligible CPUS, we would maintain state per IRQ that would
684 * allow a fair distribution of service requests. Since the
685 * expected use model is any-or-only-one, for simplicity
686 * and efficiency, we just pick the easiest one to find.
687 */
688
Mike Travise65e49d2009-01-12 15:27:13 -0800689 target = cpumask_first(irq_desc[irq].affinity);
Kevin D. Kissellf571eff2007-08-03 19:38:03 +0200690
691 /*
692 * We depend on the platform code to have correctly processed
693 * IRQ affinity change requests to ensure that the IRQ affinity
694 * mask has been purged of bits corresponding to nonexistent and
695 * offline "CPUs", and to TCs bound to VPEs other than the VPE
696 * connected to the physical interrupt input for the interrupt
697 * in question. Otherwise we have a nasty problem with interrupt
698 * mask management. This is best handled in non-performance-critical
699 * platform IRQ affinity setting code, to minimize interrupt-time
700 * checks.
701 */
702
703 /* If no one is eligible, service locally */
704 if (target >= NR_CPUS) {
705 do_IRQ_no_affinity(irq);
706 return;
707 }
708
709 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
710}
711
712#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
713
Ralf Baechle41c594a2006-04-05 09:45:45 +0100714/*
715 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
716 * Within a VPE one TC can interrupt another by different approaches.
717 * The easiest to get right would probably be to make all TCs except
718 * the target IXMT and set a software interrupt, but an IXMT-based
719 * scheme requires that a handler must run before a new IPI could
720 * be sent, which would break the "broadcast" loops in MIPS MT.
721 * A more gonzo approach within a VPE is to halt the TC, extract
722 * its Restart, Status, and a couple of GPRs, and program the Restart
723 * address to emulate an interrupt.
724 *
725 * Within a VPE, one can be confident that the target TC isn't in
726 * a critical EXL state when halted, since the write to the Halt
727 * register could not have issued on the writing thread if the
728 * halting thread had EXL set. So k0 and k1 of the target TC
729 * can be used by the injection code. Across VPEs, one can't
730 * be certain that the target TC isn't in a critical exception
731 * state. So we try a two-step process of sending a software
732 * interrupt to the target VPE, which either handles the event
733 * itself (if it was the target) or injects the event within
734 * the VPE.
735 */
736
Ralf Baechle58687562007-02-05 00:33:21 +0000737static void smtc_ipi_qdump(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100738{
739 int i;
740
741 for (i = 0; i < NR_CPUS ;i++) {
742 printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
743 i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
744 IPIQ[i].depth);
745 }
746}
747
748/*
749 * The standard atomic.h primitives don't quite do what we want
750 * here: We need an atomic add-and-return-previous-value (which
751 * could be done with atomic_add_return and a decrement) and an
752 * atomic set/zero-and-return-previous-value (which can't really
753 * be done with the atomic.h primitives). And since this is
754 * MIPS MT, we can assume that we have LL/SC.
755 */
Ralf Baechleea580402007-10-11 23:46:09 +0100756static inline int atomic_postincrement(atomic_t *v)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100757{
758 unsigned long result;
759
760 unsigned long temp;
761
762 __asm__ __volatile__(
763 "1: ll %0, %2 \n"
764 " addu %1, %0, 1 \n"
765 " sc %1, %2 \n"
766 " beqz %1, 1b \n"
Ralf Baechled87d0c92007-10-11 23:45:58 +0100767 __WEAK_LLSC_MB
Ralf Baechleea580402007-10-11 23:46:09 +0100768 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
769 : "m" (v->counter)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100770 : "memory");
771
772 return result;
773}
774
Ralf Baechle41c594a2006-04-05 09:45:45 +0100775void smtc_send_ipi(int cpu, int type, unsigned int action)
776{
777 int tcstatus;
778 struct smtc_ipi *pipi;
Ralf Baechleb7e42262008-10-01 21:52:41 +0100779 unsigned long flags;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100780 int mtflags;
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200781 unsigned long tcrestart;
782 extern void r4k_wait_irqoff(void), __pastwait(void);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100783
784 if (cpu == smp_processor_id()) {
785 printk("Cannot Send IPI to self!\n");
786 return;
787 }
788 /* Set up a descriptor, to be delivered either promptly or queued */
789 pipi = smtc_ipi_dq(&freeIPIq);
790 if (pipi == NULL) {
791 bust_spinlocks(1);
792 mips_mt_regdump(dvpe());
793 panic("IPI Msg. Buffers Depleted\n");
794 }
795 pipi->type = type;
796 pipi->arg = (void *)action;
797 pipi->dest = cpu;
798 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
Joe Perches603e82e2008-02-03 16:54:53 +0200799 /* If not on same VPE, enqueue and send cross-VPE interrupt */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100800 smtc_ipi_nq(&IPIQ[cpu], pipi);
801 LOCK_CORE_PRA();
802 settc(cpu_data[cpu].tc_id);
803 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
804 UNLOCK_CORE_PRA();
805 } else {
806 /*
807 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
808 * since ASID shootdown on the other VPE may
809 * collide with this operation.
810 */
811 LOCK_CORE_PRA();
812 settc(cpu_data[cpu].tc_id);
813 /* Halt the targeted TC */
814 write_tc_c0_tchalt(TCHALT_H);
815 mips_ihb();
816
817 /*
818 * Inspect TCStatus - if IXMT is set, we have to queue
819 * a message. Otherwise, we set up the "interrupt"
820 * of the other TC
821 */
822 tcstatus = read_tc_c0_tcstatus();
823
824 if ((tcstatus & TCSTATUS_IXMT) != 0) {
825 /*
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200826 * If we're in the the irq-off version of the wait
827 * loop, we need to force exit from the wait and
828 * do a direct post of the IPI.
829 */
830 if (cpu_wait == r4k_wait_irqoff) {
831 tcrestart = read_tc_c0_tcrestart();
832 if (tcrestart >= (unsigned long)r4k_wait_irqoff
833 && tcrestart < (unsigned long)__pastwait) {
834 write_tc_c0_tcrestart(__pastwait);
835 tcstatus &= ~TCSTATUS_IXMT;
836 write_tc_c0_tcstatus(tcstatus);
837 goto postdirect;
838 }
839 }
840 /*
841 * Otherwise we queue the message for the target TC
842 * to pick up when he does a local_irq_restore()
Ralf Baechle41c594a2006-04-05 09:45:45 +0100843 */
844 write_tc_c0_tchalt(0);
845 UNLOCK_CORE_PRA();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100846 smtc_ipi_nq(&IPIQ[cpu], pipi);
847 } else {
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200848postdirect:
Ralf Baechle41c594a2006-04-05 09:45:45 +0100849 post_direct_ipi(cpu, pipi);
850 write_tc_c0_tchalt(0);
851 UNLOCK_CORE_PRA();
852 }
853 }
854}
855
856/*
857 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
858 */
Ralf Baechle58687562007-02-05 00:33:21 +0000859static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100860{
861 struct pt_regs *kstack;
862 unsigned long tcstatus;
863 unsigned long tcrestart;
864 extern u32 kernelsp[NR_CPUS];
865 extern void __smtc_ipi_vector(void);
Ralf Baechleea580402007-10-11 23:46:09 +0100866//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100867
868 /* Extract Status, EPC from halted TC */
869 tcstatus = read_tc_c0_tcstatus();
870 tcrestart = read_tc_c0_tcrestart();
871 /* If TCRestart indicates a WAIT instruction, advance the PC */
872 if ((tcrestart & 0x80000000)
873 && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
874 tcrestart += 4;
875 }
876 /*
877 * Save on TC's future kernel stack
878 *
879 * CU bit of Status is indicator that TC was
880 * already running on a kernel stack...
881 */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100882 if (tcstatus & ST0_CU0) {
Ralf Baechle41c594a2006-04-05 09:45:45 +0100883 /* Note that this "- 1" is pointer arithmetic */
884 kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
885 } else {
886 kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
887 }
888
889 kstack->cp0_epc = (long)tcrestart;
890 /* Save TCStatus */
891 kstack->cp0_tcstatus = tcstatus;
892 /* Pass token of operation to be performed kernel stack pad area */
893 kstack->pad0[4] = (unsigned long)pipi;
894 /* Pass address of function to be called likewise */
895 kstack->pad0[5] = (unsigned long)&ipi_decode;
896 /* Set interrupt exempt and kernel mode */
897 tcstatus |= TCSTATUS_IXMT;
898 tcstatus &= ~TCSTATUS_TKSU;
899 write_tc_c0_tcstatus(tcstatus);
900 ehb();
901 /* Set TC Restart address to be SMTC IPI vector */
902 write_tc_c0_tcrestart(__smtc_ipi_vector);
903}
904
Ralf Baechle937a8012006-10-07 19:44:33 +0100905static void ipi_resched_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100906{
907 /* Return from interrupt should be enough to cause scheduler check */
908}
909
Ralf Baechle937a8012006-10-07 19:44:33 +0100910static void ipi_call_interrupt(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100911{
912 /* Invoke generic function invocation code in smp.c */
913 smp_call_function_interrupt();
914}
915
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200916DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
Ralf Baechleea580402007-10-11 23:46:09 +0100917
Ralf Baechle937a8012006-10-07 19:44:33 +0100918void ipi_decode(struct smtc_ipi *pipi)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100919{
Ralf Baechleea580402007-10-11 23:46:09 +0100920 unsigned int cpu = smp_processor_id();
921 struct clock_event_device *cd;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100922 void *arg_copy = pipi->arg;
923 int type_copy = pipi->type;
Mike Travisd2287f52009-01-14 15:43:54 -0800924 int irq = MIPS_CPU_IRQ_BASE + 1;
925
Ralf Baechle41c594a2006-04-05 09:45:45 +0100926 smtc_ipi_nq(&freeIPIq, pipi);
927 switch (type_copy) {
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100928 case SMTC_CLOCK_TICK:
Ralf Baechleae036b72007-03-27 15:11:54 +0100929 irq_enter();
Mike Travisd2287f52009-01-14 15:43:54 -0800930 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200931 cd = &per_cpu(mips_clockevent_device, cpu);
932 cd->event_handler(cd);
Ralf Baechleae036b72007-03-27 15:11:54 +0100933 irq_exit();
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100934 break;
Ralf Baechleea580402007-10-11 23:46:09 +0100935
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100936 case LINUX_SMP_IPI:
937 switch ((int)arg_copy) {
938 case SMP_RESCHEDULE_YOURSELF:
Ralf Baechle937a8012006-10-07 19:44:33 +0100939 ipi_resched_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100940 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100941 case SMP_CALL_FUNCTION:
Ralf Baechle937a8012006-10-07 19:44:33 +0100942 ipi_call_interrupt();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100943 break;
944 default:
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100945 printk("Impossible SMTC IPI Argument 0x%x\n",
946 (int)arg_copy);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100947 break;
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100948 }
949 break;
Kevin D. Kissellf571eff2007-08-03 19:38:03 +0200950#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
951 case IRQ_AFFINITY_IPI:
952 /*
953 * Accept a "forwarded" interrupt that was initially
954 * taken by a TC who doesn't have affinity for the IRQ.
955 */
956 do_IRQ_no_affinity((int)arg_copy);
957 break;
958#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
Ralf Baechle4bf42d42006-07-08 11:32:58 +0100959 default:
960 printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
961 break;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100962 }
963}
964
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200965/*
966 * Similar to smtc_ipi_replay(), but invoked from context restore,
967 * so it reuses the current exception frame rather than set up a
968 * new one with self_ipi.
969 */
970
Ralf Baechle937a8012006-10-07 19:44:33 +0100971void deferred_smtc_ipi(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100972{
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200973 int cpu = smp_processor_id();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100974
975 /*
976 * Test is not atomic, but much faster than a dequeue,
977 * and the vast majority of invocations will have a null queue.
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200978 * If irq_disabled when this was called, then any IPIs queued
979 * after we test last will be taken on the next irq_enable/restore.
980 * If interrupts were enabled, then any IPIs added after the
981 * last test will be taken directly.
Ralf Baechle41c594a2006-04-05 09:45:45 +0100982 */
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200983
984 while (IPIQ[cpu].head != NULL) {
985 struct smtc_ipi_q *q = &IPIQ[cpu];
986 struct smtc_ipi *pipi;
987 unsigned long flags;
988
989 /*
990 * It may be possible we'll come in with interrupts
991 * already enabled.
992 */
993 local_irq_save(flags);
994
995 spin_lock(&q->lock);
996 pipi = __smtc_ipi_dq(q);
997 spin_unlock(&q->lock);
998 if (pipi != NULL)
Ralf Baechle937a8012006-10-07 19:44:33 +0100999 ipi_decode(pipi);
Kevin D. Kissell8531a352008-09-09 21:48:52 +02001000 /*
1001 * The use of the __raw_local restore isn't
1002 * as obviously necessary here as in smtc_ipi_replay(),
1003 * but it's more efficient, given that we're already
1004 * running down the IPI queue.
1005 */
1006 __raw_local_irq_restore(flags);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001007 }
1008}
1009
1010/*
Ralf Baechle41c594a2006-04-05 09:45:45 +01001011 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
1012 * set via cross-VPE MTTR manipulation of the Cause register. It would be
1013 * in some regards preferable to have external logic for "doorbell" hardware
1014 * interrupts.
1015 */
1016
Atsushi Nemoto97dcb822007-01-08 02:14:29 +09001017static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
Ralf Baechle41c594a2006-04-05 09:45:45 +01001018
Ralf Baechle937a8012006-10-07 19:44:33 +01001019static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
Ralf Baechle41c594a2006-04-05 09:45:45 +01001020{
1021 int my_vpe = cpu_data[smp_processor_id()].vpe_id;
1022 int my_tc = cpu_data[smp_processor_id()].tc_id;
1023 int cpu;
1024 struct smtc_ipi *pipi;
1025 unsigned long tcstatus;
1026 int sent;
Ralf Baechleb7e42262008-10-01 21:52:41 +01001027 unsigned long flags;
Ralf Baechle41c594a2006-04-05 09:45:45 +01001028 unsigned int mtflags;
1029 unsigned int vpflags;
1030
1031 /*
1032 * So long as cross-VPE interrupts are done via
1033 * MFTR/MTTR read-modify-writes of Cause, we need
1034 * to stop other VPEs whenever the local VPE does
1035 * anything similar.
1036 */
1037 local_irq_save(flags);
1038 vpflags = dvpe();
1039 clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
1040 set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
1041 irq_enable_hazard();
1042 evpe(vpflags);
1043 local_irq_restore(flags);
1044
1045 /*
1046 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
1047 * queued for TCs on this VPE other than the current one.
1048 * Return-from-interrupt should cause us to drain the queue
1049 * for the current TC, so we ought not to have to do it explicitly here.
1050 */
1051
1052 for_each_online_cpu(cpu) {
1053 if (cpu_data[cpu].vpe_id != my_vpe)
1054 continue;
1055
1056 pipi = smtc_ipi_dq(&IPIQ[cpu]);
1057 if (pipi != NULL) {
1058 if (cpu_data[cpu].tc_id != my_tc) {
1059 sent = 0;
1060 LOCK_MT_PRA();
1061 settc(cpu_data[cpu].tc_id);
1062 write_tc_c0_tchalt(TCHALT_H);
1063 mips_ihb();
1064 tcstatus = read_tc_c0_tcstatus();
1065 if ((tcstatus & TCSTATUS_IXMT) == 0) {
1066 post_direct_ipi(cpu, pipi);
1067 sent = 1;
1068 }
1069 write_tc_c0_tchalt(0);
1070 UNLOCK_MT_PRA();
1071 if (!sent) {
1072 smtc_ipi_req(&IPIQ[cpu], pipi);
1073 }
1074 } else {
1075 /*
1076 * ipi_decode() should be called
1077 * with interrupts off
1078 */
1079 local_irq_save(flags);
Ralf Baechle937a8012006-10-07 19:44:33 +01001080 ipi_decode(pipi);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001081 local_irq_restore(flags);
1082 }
1083 }
1084 }
1085
1086 return IRQ_HANDLED;
1087}
1088
Ralf Baechle937a8012006-10-07 19:44:33 +01001089static void ipi_irq_dispatch(void)
Ralf Baechle41c594a2006-04-05 09:45:45 +01001090{
Ralf Baechle937a8012006-10-07 19:44:33 +01001091 do_IRQ(cpu_ipi_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001092}
1093
Ralf Baechle033890b2007-07-27 18:33:30 +01001094static struct irqaction irq_ipi = {
1095 .handler = ipi_interrupt,
1096 .flags = IRQF_DISABLED,
1097 .name = "SMTC_IPI",
1098 .flags = IRQF_PERCPU
1099};
Ralf Baechle41c594a2006-04-05 09:45:45 +01001100
Ralf Baechle20bb25d2007-03-27 15:19:58 +01001101static void setup_cross_vpe_interrupts(unsigned int nvpe)
Ralf Baechle41c594a2006-04-05 09:45:45 +01001102{
Ralf Baechle20bb25d2007-03-27 15:19:58 +01001103 if (nvpe < 1)
1104 return;
1105
Ralf Baechle41c594a2006-04-05 09:45:45 +01001106 if (!cpu_has_vint)
Joe Perches603e82e2008-02-03 16:54:53 +02001107 panic("SMTC Kernel requires Vectored Interrupt support");
Ralf Baechle41c594a2006-04-05 09:45:45 +01001108
1109 set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1110
Ralf Baechle41c594a2006-04-05 09:45:45 +01001111 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1112
Atsushi Nemoto14178362006-11-14 01:13:18 +09001113 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
Ralf Baechle41c594a2006-04-05 09:45:45 +01001114}
1115
1116/*
1117 * SMTC-specific hacks invoked from elsewhere in the kernel.
1118 */
1119
Kevin D. Kissell8531a352008-09-09 21:48:52 +02001120 /*
1121 * smtc_ipi_replay is called from raw_local_irq_restore
1122 */
1123
1124void smtc_ipi_replay(void)
Ralf Baechleac8be952007-01-20 00:18:01 +00001125{
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001126 unsigned int cpu = smp_processor_id();
1127
Ralf Baechleac8be952007-01-20 00:18:01 +00001128 /*
1129 * To the extent that we've ever turned interrupts off,
1130 * we may have accumulated deferred IPIs. This is subtle.
Ralf Baechleac8be952007-01-20 00:18:01 +00001131 * we should be OK: If we pick up something and dispatch
1132 * it here, that's great. If we see nothing, but concurrent
1133 * with this operation, another TC sends us an IPI, IXMT
1134 * is clear, and we'll handle it as a real pseudo-interrupt
Kevin D. Kissell8531a352008-09-09 21:48:52 +02001135 * and not a pseudo-pseudo interrupt. The important thing
1136 * is to do the last check for queued message *after* the
1137 * re-enabling of interrupts.
Ralf Baechleac8be952007-01-20 00:18:01 +00001138 */
Kevin D. Kissell8531a352008-09-09 21:48:52 +02001139 while (IPIQ[cpu].head != NULL) {
1140 struct smtc_ipi_q *q = &IPIQ[cpu];
1141 struct smtc_ipi *pipi;
1142 unsigned long flags;
Ralf Baechleac8be952007-01-20 00:18:01 +00001143
Kevin D. Kissell8531a352008-09-09 21:48:52 +02001144 /*
1145 * It's just possible we'll come in with interrupts
1146 * already enabled.
1147 */
1148 local_irq_save(flags);
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001149
Kevin D. Kissell8531a352008-09-09 21:48:52 +02001150 spin_lock(&q->lock);
1151 pipi = __smtc_ipi_dq(q);
1152 spin_unlock(&q->lock);
1153 /*
1154 ** But use a raw restore here to avoid recursion.
1155 */
1156 __raw_local_irq_restore(flags);
1157
1158 if (pipi) {
Ralf Baechleac8be952007-01-20 00:18:01 +00001159 self_ipi(pipi);
Ralf Baechle8a1e97e2007-03-29 23:42:42 +01001160 smtc_cpu_stats[cpu].selfipis++;
Ralf Baechleac8be952007-01-20 00:18:01 +00001161 }
1162 }
1163}
1164
Ralf Baechleec43c012007-01-24 19:23:21 +00001165EXPORT_SYMBOL(smtc_ipi_replay);
1166
Ralf Baechle41c594a2006-04-05 09:45:45 +01001167void smtc_idle_loop_hook(void)
1168{
Ralf Baechlec68644d2007-02-26 20:46:34 +00001169#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
Ralf Baechle41c594a2006-04-05 09:45:45 +01001170 int im;
1171 int flags;
1172 int mtflags;
1173 int bit;
1174 int vpe;
1175 int tc;
1176 int hook_ntcs;
1177 /*
1178 * printk within DMT-protected regions can deadlock,
1179 * so buffer diagnostic messages for later output.
1180 */
1181 char *pdb_msg;
1182 char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1183
1184 if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1185 if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1186 int mvpconf0;
1187 /* Tedious stuff to just do once */
1188 mvpconf0 = read_c0_mvpconf0();
1189 hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1190 if (hook_ntcs > NR_CPUS)
1191 hook_ntcs = NR_CPUS;
1192 for (tc = 0; tc < hook_ntcs; tc++) {
1193 tcnoprog[tc] = 0;
1194 clock_hang_reported[tc] = 0;
1195 }
1196 for (vpe = 0; vpe < 2; vpe++)
1197 for (im = 0; im < 8; im++)
1198 imstuckcount[vpe][im] = 0;
1199 printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1200 atomic_set(&idle_hook_initialized, 1000);
1201 } else {
1202 /* Someone else is initializing in parallel - let 'em finish */
1203 while (atomic_read(&idle_hook_initialized) < 1000)
1204 ;
1205 }
1206 }
1207
1208 /* Have we stupidly left IXMT set somewhere? */
1209 if (read_c0_tcstatus() & 0x400) {
1210 write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1211 ehb();
1212 printk("Dangling IXMT in cpu_idle()\n");
1213 }
1214
1215 /* Have we stupidly left an IM bit turned off? */
1216#define IM_LIMIT 2000
1217 local_irq_save(flags);
1218 mtflags = dmt();
1219 pdb_msg = &id_ho_db_msg[0];
1220 im = read_c0_status();
Ralf Baechle8f8771a2007-07-10 17:32:56 +01001221 vpe = current_cpu_data.vpe_id;
Ralf Baechle41c594a2006-04-05 09:45:45 +01001222 for (bit = 0; bit < 8; bit++) {
1223 /*
1224 * In current prototype, I/O interrupts
1225 * are masked for VPE > 0
1226 */
1227 if (vpemask[vpe][bit]) {
1228 if (!(im & (0x100 << bit)))
1229 imstuckcount[vpe][bit]++;
1230 else
1231 imstuckcount[vpe][bit] = 0;
1232 if (imstuckcount[vpe][bit] > IM_LIMIT) {
1233 set_c0_status(0x100 << bit);
1234 ehb();
1235 imstuckcount[vpe][bit] = 0;
1236 pdb_msg += sprintf(pdb_msg,
1237 "Dangling IM %d fixed for VPE %d\n", bit,
1238 vpe);
1239 }
1240 }
1241 }
1242
Ralf Baechle41c594a2006-04-05 09:45:45 +01001243 emt(mtflags);
1244 local_irq_restore(flags);
1245 if (pdb_msg != &id_ho_db_msg[0])
1246 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
Ralf Baechlec68644d2007-02-26 20:46:34 +00001247#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
Ralf Baechle41c594a2006-04-05 09:45:45 +01001248
Kevin D. Kissell8531a352008-09-09 21:48:52 +02001249 smtc_ipi_replay();
Ralf Baechle41c594a2006-04-05 09:45:45 +01001250}
1251
1252void smtc_soft_dump(void)
1253{
1254 int i;
1255
1256 printk("Counter Interrupts taken per CPU (TC)\n");
1257 for (i=0; i < NR_CPUS; i++) {
1258 printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1259 }
1260 printk("Self-IPI invocations:\n");
1261 for (i=0; i < NR_CPUS; i++) {
1262 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1263 }
1264 smtc_ipi_qdump();
Ralf Baechle41c594a2006-04-05 09:45:45 +01001265 printk("%d Recoveries of \"stolen\" FPU\n",
1266 atomic_read(&smtc_fpu_recoveries));
1267}
1268
1269
1270/*
1271 * TLB management routines special to SMTC
1272 */
1273
1274void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1275{
1276 unsigned long flags, mtflags, tcstat, prevhalt, asid;
1277 int tlb, i;
1278
1279 /*
1280 * It would be nice to be able to use a spinlock here,
1281 * but this is invoked from within TLB flush routines
1282 * that protect themselves with DVPE, so if a lock is
Ralf Baechlee0daad42007-02-05 00:10:11 +00001283 * held by another TC, it'll never be freed.
Ralf Baechle41c594a2006-04-05 09:45:45 +01001284 *
1285 * DVPE/DMT must not be done with interrupts enabled,
1286 * so even so most callers will already have disabled
1287 * them, let's be really careful...
1288 */
1289
1290 local_irq_save(flags);
1291 if (smtc_status & SMTC_TLB_SHARED) {
1292 mtflags = dvpe();
1293 tlb = 0;
1294 } else {
1295 mtflags = dmt();
1296 tlb = cpu_data[cpu].vpe_id;
1297 }
1298 asid = asid_cache(cpu);
1299
1300 do {
1301 if (!((asid += ASID_INC) & ASID_MASK) ) {
1302 if (cpu_has_vtag_icache)
1303 flush_icache_all();
1304 /* Traverse all online CPUs (hack requires contigous range) */
Ralf Baechleb5eb5512007-10-03 19:16:57 +01001305 for_each_online_cpu(i) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001306 /*
1307 * We don't need to worry about our own CPU, nor those of
1308 * CPUs who don't share our TLB.
1309 */
1310 if ((i != smp_processor_id()) &&
1311 ((smtc_status & SMTC_TLB_SHARED) ||
1312 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1313 settc(cpu_data[i].tc_id);
1314 prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1315 if (!prevhalt) {
1316 write_tc_c0_tchalt(TCHALT_H);
1317 mips_ihb();
1318 }
1319 tcstat = read_tc_c0_tcstatus();
1320 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1321 if (!prevhalt)
1322 write_tc_c0_tchalt(0);
1323 }
1324 }
1325 if (!asid) /* fix version if needed */
1326 asid = ASID_FIRST_VERSION;
1327 local_flush_tlb_all(); /* start new asid cycle */
1328 }
1329 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1330
1331 /*
1332 * SMTC shares the TLB within VPEs and possibly across all VPEs.
1333 */
Ralf Baechleb5eb5512007-10-03 19:16:57 +01001334 for_each_online_cpu(i) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001335 if ((smtc_status & SMTC_TLB_SHARED) ||
1336 (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1337 cpu_context(i, mm) = asid_cache(i) = asid;
1338 }
1339
1340 if (smtc_status & SMTC_TLB_SHARED)
1341 evpe(mtflags);
1342 else
1343 emt(mtflags);
1344 local_irq_restore(flags);
1345}
1346
1347/*
1348 * Invoked from macros defined in mmu_context.h
1349 * which must already have disabled interrupts
1350 * and done a DVPE or DMT as appropriate.
1351 */
1352
1353void smtc_flush_tlb_asid(unsigned long asid)
1354{
1355 int entry;
1356 unsigned long ehi;
1357
1358 entry = read_c0_wired();
1359
1360 /* Traverse all non-wired entries */
1361 while (entry < current_cpu_data.tlbsize) {
1362 write_c0_index(entry);
1363 ehb();
1364 tlb_read();
1365 ehb();
1366 ehi = read_c0_entryhi();
Ralf Baechle4bf42d42006-07-08 11:32:58 +01001367 if ((ehi & ASID_MASK) == asid) {
Ralf Baechle41c594a2006-04-05 09:45:45 +01001368 /*
1369 * Invalidate only entries with specified ASID,
1370 * makiing sure all entries differ.
1371 */
1372 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1373 write_c0_entrylo0(0);
1374 write_c0_entrylo1(0);
1375 mtc0_tlbw_hazard();
1376 tlb_write_indexed();
1377 }
1378 entry++;
1379 }
1380 write_c0_index(PARKED_INDEX);
1381 tlbw_use_hazard();
1382}
1383
1384/*
1385 * Support for single-threading cache flush operations.
1386 */
1387
Ralf Baechle58687562007-02-05 00:33:21 +00001388static int halt_state_save[NR_CPUS];
Ralf Baechle41c594a2006-04-05 09:45:45 +01001389
1390/*
1391 * To really, really be sure that nothing is being done
1392 * by other TCs, halt them all. This code assumes that
1393 * a DVPE has already been done, so while their Halted
1394 * state is theoretically architecturally unstable, in
1395 * practice, it's not going to change while we're looking
1396 * at it.
1397 */
1398
1399void smtc_cflush_lockdown(void)
1400{
1401 int cpu;
1402
1403 for_each_online_cpu(cpu) {
1404 if (cpu != smp_processor_id()) {
1405 settc(cpu_data[cpu].tc_id);
1406 halt_state_save[cpu] = read_tc_c0_tchalt();
1407 write_tc_c0_tchalt(TCHALT_H);
1408 }
1409 }
1410 mips_ihb();
1411}
1412
1413/* It would be cheating to change the cpu_online states during a flush! */
1414
1415void smtc_cflush_release(void)
1416{
1417 int cpu;
1418
1419 /*
1420 * Start with a hazard barrier to ensure
1421 * that all CACHE ops have played through.
1422 */
1423 mips_ihb();
1424
1425 for_each_online_cpu(cpu) {
1426 if (cpu != smp_processor_id()) {
1427 settc(cpu_data[cpu].tc_id);
1428 write_tc_c0_tchalt(halt_state_save[cpu]);
1429 }
1430 }
1431 mips_ihb();
1432}