blob: 596780e2c7dace8bb40ea36b9411a1dd28223b7c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/alpha/kernel/smp.c
3 *
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
8 *
9 * This is helpful for DCPI.
10 *
11 */
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/threads.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/spinlock.h>
26#include <linux/irq.h>
27#include <linux/cache.h>
28#include <linux/profile.h>
29#include <linux/bitops.h>
30
31#include <asm/hwrpb.h>
32#include <asm/ptrace.h>
33#include <asm/atomic.h>
34
35#include <asm/io.h>
36#include <asm/irq.h>
37#include <asm/pgtable.h>
38#include <asm/pgalloc.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41
42#include "proto.h"
43#include "irq_impl.h"
44
45
46#define DEBUG_SMP 0
47#if DEBUG_SMP
48#define DBGS(args) printk args
49#else
50#define DBGS(args)
51#endif
52
53/* A collection of per-processor data. */
54struct cpuinfo_alpha cpu_data[NR_CPUS];
55
56/* A collection of single bit ipi messages. */
57static struct {
58 unsigned long bits ____cacheline_aligned;
59} ipi_data[NR_CPUS] __cacheline_aligned;
60
61enum ipi_message_type {
62 IPI_RESCHEDULE,
63 IPI_CALL_FUNC,
64 IPI_CPU_STOP,
65};
66
67/* Set to a secondary's cpuid when it comes online. */
68static int smp_secondary_alive __initdata = 0;
69
70/* Which cpus ids came online. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070071cpumask_t cpu_online_map;
72
73EXPORT_SYMBOL(cpu_online_map);
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075int smp_num_probed; /* Internal processor count */
76int smp_num_cpus = 1; /* Number that came online. */
77
78extern void calibrate_delay(void);
79
80
81
82/*
83 * Called by both boot and secondaries to move global data into
84 * per-processor storage.
85 */
86static inline void __init
87smp_store_cpu_info(int cpuid)
88{
89 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
90 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
91 cpu_data[cpuid].need_new_asn = 0;
92 cpu_data[cpuid].asn_lock = 0;
93}
94
95/*
96 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
97 */
98static inline void __init
99smp_setup_percpu_timer(int cpuid)
100{
101 cpu_data[cpuid].prof_counter = 1;
102 cpu_data[cpuid].prof_multiplier = 1;
103}
104
105static void __init
106wait_boot_cpu_to_stop(int cpuid)
107{
108 unsigned long stop = jiffies + 10*HZ;
109
110 while (time_before(jiffies, stop)) {
111 if (!smp_secondary_alive)
112 return;
113 barrier();
114 }
115
116 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
117 for (;;)
118 barrier();
119}
120
121/*
122 * Where secondaries begin a life of C.
123 */
124void __init
125smp_callin(void)
126{
127 int cpuid = hard_smp_processor_id();
128
129 if (cpu_test_and_set(cpuid, cpu_online_map)) {
130 printk("??, cpu 0x%x already present??\n", cpuid);
131 BUG();
132 }
133
134 /* Turn on machine checks. */
135 wrmces(7);
136
137 /* Set trap vectors. */
138 trap_init();
139
140 /* Set interrupt vector. */
141 wrent(entInt, 0);
142
143 /* Get our local ticker going. */
144 smp_setup_percpu_timer(cpuid);
145
146 /* Call platform-specific callin, if specified */
147 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
148
149 /* All kernel threads share the same mm context. */
150 atomic_inc(&init_mm.mm_count);
151 current->active_mm = &init_mm;
152
153 /* Must have completely accurate bogos. */
154 local_irq_enable();
155
156 /* Wait boot CPU to stop with irq enabled before running
157 calibrate_delay. */
158 wait_boot_cpu_to_stop(cpuid);
159 mb();
160 calibrate_delay();
161
162 smp_store_cpu_info(cpuid);
163 /* Allow master to continue only after we written loops_per_jiffy. */
164 wmb();
165 smp_secondary_alive = 1;
166
167 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
168 cpuid, current, current->active_mm));
169
170 /* Do nothing. */
171 cpu_idle();
172}
173
174/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
175static int __init
176wait_for_txrdy (unsigned long cpumask)
177{
178 unsigned long timeout;
179
180 if (!(hwrpb->txrdy & cpumask))
181 return 0;
182
183 timeout = jiffies + 10*HZ;
184 while (time_before(jiffies, timeout)) {
185 if (!(hwrpb->txrdy & cpumask))
186 return 0;
187 udelay(10);
188 barrier();
189 }
190
191 return -1;
192}
193
194/*
195 * Send a message to a secondary's console. "START" is one such
196 * interesting message. ;-)
197 */
198static void __init
199send_secondary_console_msg(char *str, int cpuid)
200{
201 struct percpu_struct *cpu;
202 register char *cp1, *cp2;
203 unsigned long cpumask;
204 size_t len;
205
206 cpu = (struct percpu_struct *)
207 ((char*)hwrpb
208 + hwrpb->processor_offset
209 + cpuid * hwrpb->processor_size);
210
211 cpumask = (1UL << cpuid);
212 if (wait_for_txrdy(cpumask))
213 goto timeout;
214
215 cp2 = str;
216 len = strlen(cp2);
217 *(unsigned int *)&cpu->ipc_buffer[0] = len;
218 cp1 = (char *) &cpu->ipc_buffer[1];
219 memcpy(cp1, cp2, len);
220
221 /* atomic test and set */
222 wmb();
223 set_bit(cpuid, &hwrpb->rxrdy);
224
225 if (wait_for_txrdy(cpumask))
226 goto timeout;
227 return;
228
229 timeout:
230 printk("Processor %x not ready\n", cpuid);
231}
232
233/*
234 * A secondary console wants to send a message. Receive it.
235 */
236static void
237recv_secondary_console_msg(void)
238{
239 int mycpu, i, cnt;
240 unsigned long txrdy = hwrpb->txrdy;
241 char *cp1, *cp2, buf[80];
242 struct percpu_struct *cpu;
243
244 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
245
246 mycpu = hard_smp_processor_id();
247
248 for (i = 0; i < NR_CPUS; i++) {
249 if (!(txrdy & (1UL << i)))
250 continue;
251
252 DBGS(("recv_secondary_console_msg: "
253 "TXRDY contains CPU %d.\n", i));
254
255 cpu = (struct percpu_struct *)
256 ((char*)hwrpb
257 + hwrpb->processor_offset
258 + i * hwrpb->processor_size);
259
260 DBGS(("recv_secondary_console_msg: on %d from %d"
261 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
262 mycpu, i, cpu->halt_reason, cpu->flags));
263
264 cnt = cpu->ipc_buffer[0] >> 32;
265 if (cnt <= 0 || cnt >= 80)
266 strcpy(buf, "<<< BOGUS MSG >>>");
267 else {
268 cp1 = (char *) &cpu->ipc_buffer[11];
269 cp2 = buf;
270 strcpy(cp2, cp1);
271
272 while ((cp2 = strchr(cp2, '\r')) != 0) {
273 *cp2 = ' ';
274 if (cp2[1] == '\n')
275 cp2[1] = ' ';
276 }
277 }
278
279 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
280 "message is '%s'\n", mycpu, buf));
281 }
282
283 hwrpb->txrdy = 0;
284}
285
286/*
287 * Convince the console to have a secondary cpu begin execution.
288 */
289static int __init
290secondary_cpu_start(int cpuid, struct task_struct *idle)
291{
292 struct percpu_struct *cpu;
293 struct pcb_struct *hwpcb, *ipcb;
294 unsigned long timeout;
295
296 cpu = (struct percpu_struct *)
297 ((char*)hwrpb
298 + hwrpb->processor_offset
299 + cpuid * hwrpb->processor_size);
300 hwpcb = (struct pcb_struct *) cpu->hwpcb;
Al Viro37bfbaf2006-01-12 01:05:36 -0800301 ipcb = &task_thread_info(idle)->pcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303 /* Initialize the CPU's HWPCB to something just good enough for
304 us to get started. Immediately after starting, we'll swpctx
305 to the target idle task's pcb. Reuse the stack in the mean
306 time. Precalculate the target PCBB. */
307 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
308 hwpcb->usp = 0;
309 hwpcb->ptbr = ipcb->ptbr;
310 hwpcb->pcc = 0;
311 hwpcb->asn = 0;
312 hwpcb->unique = virt_to_phys(ipcb);
313 hwpcb->flags = ipcb->flags;
314 hwpcb->res1 = hwpcb->res2 = 0;
315
316#if 0
317 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
318 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
319#endif
320 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
321 cpuid, idle->state, ipcb->flags));
322
323 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
324 hwrpb->CPU_restart = __smp_callin;
325 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
326
327 /* Recalculate and update the HWRPB checksum */
328 hwrpb_update_checksum(hwrpb);
329
330 /*
331 * Send a "start" command to the specified processor.
332 */
333
334 /* SRM III 3.4.1.3 */
335 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
336 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
337 wmb();
338
339 send_secondary_console_msg("START\r\n", cpuid);
340
341 /* Wait 10 seconds for an ACK from the console. */
342 timeout = jiffies + 10*HZ;
343 while (time_before(jiffies, timeout)) {
344 if (cpu->flags & 1)
345 goto started;
346 udelay(10);
347 barrier();
348 }
349 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
350 return -1;
351
352 started:
353 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
354 return 0;
355}
356
357/*
358 * Bring one cpu online.
359 */
360static int __init
361smp_boot_one_cpu(int cpuid)
362{
363 struct task_struct *idle;
364 unsigned long timeout;
365
366 /* Cook up an idler for this guy. Note that the address we
367 give to kernel_thread is irrelevant -- it's going to start
368 where HWRPB.CPU_restart says to start. But this gets all
369 the other task-y sort of data structures set up like we
370 wish. We can't use kernel_thread since we must avoid
371 rescheduling the child. */
372 idle = fork_idle(cpuid);
373 if (IS_ERR(idle))
374 panic("failed fork for CPU %d", cpuid);
375
376 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
377 cpuid, idle->state, idle->flags));
378
379 /* Signal the secondary to wait a moment. */
380 smp_secondary_alive = -1;
381
382 /* Whirrr, whirrr, whirrrrrrrrr... */
383 if (secondary_cpu_start(cpuid, idle))
384 return -1;
385
386 /* Notify the secondary CPU it can run calibrate_delay. */
387 mb();
388 smp_secondary_alive = 0;
389
390 /* We've been acked by the console; wait one second for
391 the task to start up for real. */
392 timeout = jiffies + 1*HZ;
393 while (time_before(jiffies, timeout)) {
394 if (smp_secondary_alive == 1)
395 goto alive;
396 udelay(10);
397 barrier();
398 }
399
400 /* We failed to boot the CPU. */
401
402 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
403 return -1;
404
405 alive:
406 /* Another "Red Snapper". */
407 return 0;
408}
409
410/*
411 * Called from setup_arch. Detect an SMP system and which processors
412 * are present.
413 */
414void __init
415setup_smp(void)
416{
417 struct percpu_struct *cpubase, *cpu;
418 unsigned long i;
419
420 if (boot_cpuid != 0) {
421 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
422 boot_cpuid);
423 }
424
425 if (hwrpb->nr_processors > 1) {
426 int boot_cpu_palrev;
427
428 DBGS(("setup_smp: nr_processors %ld\n",
429 hwrpb->nr_processors));
430
431 cpubase = (struct percpu_struct *)
432 ((char*)hwrpb + hwrpb->processor_offset);
433 boot_cpu_palrev = cpubase->pal_revision;
434
435 for (i = 0; i < hwrpb->nr_processors; i++) {
436 cpu = (struct percpu_struct *)
437 ((char *)cpubase + i*hwrpb->processor_size);
438 if ((cpu->flags & 0x1cc) == 0x1cc) {
439 smp_num_probed++;
440 /* Assume here that "whami" == index */
Ivan Kokshayskyc7d2d282006-06-04 02:51:34 -0700441 cpu_set(i, cpu_present_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 cpu->pal_revision = boot_cpu_palrev;
443 }
444
445 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
446 i, cpu->flags, cpu->type));
447 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
448 i, cpu->pal_revision));
449 }
450 } else {
451 smp_num_probed = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Ivan Kokshayskyc7d2d282006-06-04 02:51:34 -0700454 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
455 smp_num_probed, cpu_present_map.bits[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456}
457
458/*
459 * Called by smp_init prepare the secondaries
460 */
461void __init
462smp_prepare_cpus(unsigned int max_cpus)
463{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 /* Take care of some initial bookkeeping. */
465 memset(ipi_data, 0, sizeof(ipi_data));
466
467 current_thread_info()->cpu = boot_cpuid;
468
469 smp_store_cpu_info(boot_cpuid);
470 smp_setup_percpu_timer(boot_cpuid);
471
472 /* Nothing to do on a UP box, or when told not to. */
473 if (smp_num_probed == 1 || max_cpus == 0) {
Ivan Kokshayskyc7d2d282006-06-04 02:51:34 -0700474 cpu_present_map = cpumask_of_cpu(boot_cpuid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 printk(KERN_INFO "SMP mode deactivated.\n");
476 return;
477 }
478
479 printk(KERN_INFO "SMP starting up secondaries.\n");
480
Ivan Kokshaysky328c2a82006-02-08 11:55:06 +0300481 smp_num_cpus = smp_num_probed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482}
483
484void __devinit
485smp_prepare_boot_cpu(void)
486{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487}
488
489int __devinit
490__cpu_up(unsigned int cpu)
491{
492 smp_boot_one_cpu(cpu);
493
494 return cpu_online(cpu) ? 0 : -ENOSYS;
495}
496
497void __init
498smp_cpus_done(unsigned int max_cpus)
499{
500 int cpu;
501 unsigned long bogosum = 0;
502
503 for(cpu = 0; cpu < NR_CPUS; cpu++)
504 if (cpu_online(cpu))
505 bogosum += cpu_data[cpu].loops_per_jiffy;
506
507 printk(KERN_INFO "SMP: Total of %d processors activated "
508 "(%lu.%02lu BogoMIPS).\n",
509 num_online_cpus(),
510 (bogosum + 2500) / (500000/HZ),
511 ((bogosum + 2500) / (5000/HZ)) % 100);
512}
513
514
515void
516smp_percpu_timer_interrupt(struct pt_regs *regs)
517{
Al Viro8774cb82006-10-07 14:17:31 +0100518 struct pt_regs *old_regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 int cpu = smp_processor_id();
520 unsigned long user = user_mode(regs);
521 struct cpuinfo_alpha *data = &cpu_data[cpu];
522
Al Viro8774cb82006-10-07 14:17:31 +0100523 old_regs = set_irq_regs(regs);
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 /* Record kernel PC. */
Al Viro8774cb82006-10-07 14:17:31 +0100526 profile_tick(CPU_PROFILING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
528 if (!--data->prof_counter) {
529 /* We need to make like a normal interrupt -- otherwise
530 timer interrupts ignore the global interrupt lock,
531 which would be a Bad Thing. */
532 irq_enter();
533
534 update_process_times(user);
535
536 data->prof_counter = data->prof_multiplier;
537
538 irq_exit();
539 }
Al Viro8774cb82006-10-07 14:17:31 +0100540 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
543int __init
544setup_profiling_timer(unsigned int multiplier)
545{
546 return -EINVAL;
547}
548
549
550static void
551send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
552{
553 int i;
554
555 mb();
556 for_each_cpu_mask(i, to_whom)
557 set_bit(operation, &ipi_data[i].bits);
558
559 mb();
560 for_each_cpu_mask(i, to_whom)
561 wripir(i);
562}
563
564/* Structure and data for smp_call_function. This is designed to
565 minimize static memory requirements. Plus it looks cleaner. */
566
567struct smp_call_struct {
568 void (*func) (void *info);
569 void *info;
570 long wait;
571 atomic_t unstarted_count;
572 atomic_t unfinished_count;
573};
574
575static struct smp_call_struct *smp_call_function_data;
576
577/* Atomicly drop data into a shared pointer. The pointer is free if
578 it is initially locked. If retry, spin until free. */
579
580static int
581pointer_lock (void *lock, void *data, int retry)
582{
583 void *old, *tmp;
584
585 mb();
586 again:
587 /* Compare and swap with zero. */
588 asm volatile (
589 "1: ldq_l %0,%1\n"
590 " mov %3,%2\n"
591 " bne %0,2f\n"
592 " stq_c %2,%1\n"
593 " beq %2,1b\n"
594 "2:"
595 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
596 : "r"(data)
597 : "memory");
598
599 if (old == 0)
600 return 0;
601 if (! retry)
602 return -EBUSY;
603
604 while (*(void **)lock)
605 barrier();
606 goto again;
607}
608
609void
610handle_ipi(struct pt_regs *regs)
611{
612 int this_cpu = smp_processor_id();
613 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
614 unsigned long ops;
615
616#if 0
617 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
618 this_cpu, *pending_ipis, regs->pc));
619#endif
620
621 mb(); /* Order interrupt and bit testing. */
622 while ((ops = xchg(pending_ipis, 0)) != 0) {
623 mb(); /* Order bit clearing and data access. */
624 do {
625 unsigned long which;
626
627 which = ops & -ops;
628 ops &= ~which;
629 which = __ffs(which);
630
631 switch (which) {
632 case IPI_RESCHEDULE:
633 /* Reschedule callback. Everything to be done
634 is done by the interrupt return path. */
635 break;
636
637 case IPI_CALL_FUNC:
638 {
639 struct smp_call_struct *data;
640 void (*func)(void *info);
641 void *info;
642 int wait;
643
644 data = smp_call_function_data;
645 func = data->func;
646 info = data->info;
647 wait = data->wait;
648
649 /* Notify the sending CPU that the data has been
650 received, and execution is about to begin. */
651 mb();
652 atomic_dec (&data->unstarted_count);
653
654 /* At this point the structure may be gone unless
655 wait is true. */
656 (*func)(info);
657
658 /* Notify the sending CPU that the task is done. */
659 mb();
660 if (wait) atomic_dec (&data->unfinished_count);
661 break;
662 }
663
664 case IPI_CPU_STOP:
665 halt();
666
667 default:
668 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
669 this_cpu, which);
670 break;
671 }
672 } while (ops);
673
674 mb(); /* Order data access and bit testing. */
675 }
676
677 cpu_data[this_cpu].ipi_count++;
678
679 if (hwrpb->txrdy)
680 recv_secondary_console_msg();
681}
682
683void
684smp_send_reschedule(int cpu)
685{
686#ifdef DEBUG_IPI_MSG
687 if (cpu == hard_smp_processor_id())
688 printk(KERN_WARNING
689 "smp_send_reschedule: Sending IPI to self.\n");
690#endif
691 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
692}
693
694void
695smp_send_stop(void)
696{
697 cpumask_t to_whom = cpu_possible_map;
698 cpu_clear(smp_processor_id(), to_whom);
699#ifdef DEBUG_IPI_MSG
700 if (hard_smp_processor_id() != boot_cpu_id)
701 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
702#endif
703 send_ipi_message(to_whom, IPI_CPU_STOP);
704}
705
706/*
707 * Run a function on all other CPUs.
708 * <func> The function to run. This must be fast and non-blocking.
709 * <info> An arbitrary pointer to pass to the function.
710 * <retry> If true, keep retrying until ready.
711 * <wait> If true, wait until function has completed on other CPUs.
712 * [RETURNS] 0 on success, else a negative status code.
713 *
714 * Does not return until remote CPUs are nearly ready to execute <func>
715 * or are or have executed.
716 * You must not call this function with disabled interrupts or from a
717 * hardware interrupt handler or from a bottom half handler.
718 */
719
720int
721smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
722 int wait, cpumask_t to_whom)
723{
724 struct smp_call_struct data;
725 unsigned long timeout;
726 int num_cpus_to_call;
727
728 /* Can deadlock when called with interrupts disabled */
729 WARN_ON(irqs_disabled());
730
731 data.func = func;
732 data.info = info;
733 data.wait = wait;
734
735 cpu_clear(smp_processor_id(), to_whom);
736 num_cpus_to_call = cpus_weight(to_whom);
737
738 atomic_set(&data.unstarted_count, num_cpus_to_call);
739 atomic_set(&data.unfinished_count, num_cpus_to_call);
740
741 /* Acquire the smp_call_function_data mutex. */
742 if (pointer_lock(&smp_call_function_data, &data, retry))
743 return -EBUSY;
744
745 /* Send a message to the requested CPUs. */
746 send_ipi_message(to_whom, IPI_CALL_FUNC);
747
748 /* Wait for a minimal response. */
749 timeout = jiffies + HZ;
750 while (atomic_read (&data.unstarted_count) > 0
751 && time_before (jiffies, timeout))
752 barrier();
753
754 /* If there's no response yet, log a message but allow a longer
755 * timeout period -- if we get a response this time, log
756 * a message saying when we got it..
757 */
758 if (atomic_read(&data.unstarted_count) > 0) {
759 long start_time = jiffies;
760 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
761 __FUNCTION__);
762 timeout = jiffies + 30 * HZ;
763 while (atomic_read(&data.unstarted_count) > 0
764 && time_before(jiffies, timeout))
765 barrier();
766 if (atomic_read(&data.unstarted_count) <= 0) {
767 long delta = jiffies - start_time;
768 printk(KERN_ERR
769 "%s: response %ld.%ld seconds into long wait\n",
770 __FUNCTION__, delta / HZ,
771 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
772 }
773 }
774
775 /* We either got one or timed out -- clear the lock. */
776 mb();
777 smp_call_function_data = NULL;
778
779 /*
780 * If after both the initial and long timeout periods we still don't
781 * have a response, something is very wrong...
782 */
783 BUG_ON(atomic_read (&data.unstarted_count) > 0);
784
785 /* Wait for a complete response, if needed. */
786 if (wait) {
787 while (atomic_read (&data.unfinished_count) > 0)
788 barrier();
789 }
790
791 return 0;
792}
793
794int
795smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
796{
797 return smp_call_function_on_cpu (func, info, retry, wait,
798 cpu_online_map);
799}
800
801static void
802ipi_imb(void *ignored)
803{
804 imb();
805}
806
807void
808smp_imb(void)
809{
810 /* Must wait other processors to flush their icache before continue. */
811 if (on_each_cpu(ipi_imb, NULL, 1, 1))
812 printk(KERN_CRIT "smp_imb: timed out\n");
813}
814
815static void
816ipi_flush_tlb_all(void *ignored)
817{
818 tbia();
819}
820
821void
822flush_tlb_all(void)
823{
824 /* Although we don't have any data to pass, we do want to
825 synchronize with the other processors. */
826 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
827 printk(KERN_CRIT "flush_tlb_all: timed out\n");
828 }
829}
830
831#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
832
833static void
834ipi_flush_tlb_mm(void *x)
835{
836 struct mm_struct *mm = (struct mm_struct *) x;
837 if (mm == current->active_mm && !asn_locked())
838 flush_tlb_current(mm);
839 else
840 flush_tlb_other(mm);
841}
842
843void
844flush_tlb_mm(struct mm_struct *mm)
845{
846 preempt_disable();
847
848 if (mm == current->active_mm) {
849 flush_tlb_current(mm);
850 if (atomic_read(&mm->mm_users) <= 1) {
851 int cpu, this_cpu = smp_processor_id();
852 for (cpu = 0; cpu < NR_CPUS; cpu++) {
853 if (!cpu_online(cpu) || cpu == this_cpu)
854 continue;
855 if (mm->context[cpu])
856 mm->context[cpu] = 0;
857 }
858 preempt_enable();
859 return;
860 }
861 }
862
863 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
864 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
865 }
866
867 preempt_enable();
868}
869
870struct flush_tlb_page_struct {
871 struct vm_area_struct *vma;
872 struct mm_struct *mm;
873 unsigned long addr;
874};
875
876static void
877ipi_flush_tlb_page(void *x)
878{
879 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
880 struct mm_struct * mm = data->mm;
881
882 if (mm == current->active_mm && !asn_locked())
883 flush_tlb_current_page(mm, data->vma, data->addr);
884 else
885 flush_tlb_other(mm);
886}
887
888void
889flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
890{
891 struct flush_tlb_page_struct data;
892 struct mm_struct *mm = vma->vm_mm;
893
894 preempt_disable();
895
896 if (mm == current->active_mm) {
897 flush_tlb_current_page(mm, vma, addr);
898 if (atomic_read(&mm->mm_users) <= 1) {
899 int cpu, this_cpu = smp_processor_id();
900 for (cpu = 0; cpu < NR_CPUS; cpu++) {
901 if (!cpu_online(cpu) || cpu == this_cpu)
902 continue;
903 if (mm->context[cpu])
904 mm->context[cpu] = 0;
905 }
906 preempt_enable();
907 return;
908 }
909 }
910
911 data.vma = vma;
912 data.mm = mm;
913 data.addr = addr;
914
915 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
916 printk(KERN_CRIT "flush_tlb_page: timed out\n");
917 }
918
919 preempt_enable();
920}
921
922void
923flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
924{
925 /* On the Alpha we always flush the whole user tlb. */
926 flush_tlb_mm(vma->vm_mm);
927}
928
929static void
930ipi_flush_icache_page(void *x)
931{
932 struct mm_struct *mm = (struct mm_struct *) x;
933 if (mm == current->active_mm && !asn_locked())
934 __load_new_mm_context(mm);
935 else
936 flush_tlb_other(mm);
937}
938
939void
940flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
941 unsigned long addr, int len)
942{
943 struct mm_struct *mm = vma->vm_mm;
944
945 if ((vma->vm_flags & VM_EXEC) == 0)
946 return;
947
948 preempt_disable();
949
950 if (mm == current->active_mm) {
951 __load_new_mm_context(mm);
952 if (atomic_read(&mm->mm_users) <= 1) {
953 int cpu, this_cpu = smp_processor_id();
954 for (cpu = 0; cpu < NR_CPUS; cpu++) {
955 if (!cpu_online(cpu) || cpu == this_cpu)
956 continue;
957 if (mm->context[cpu])
958 mm->context[cpu] = 0;
959 }
960 preempt_enable();
961 return;
962 }
963 }
964
965 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
966 printk(KERN_CRIT "flush_icache_page: timed out\n");
967 }
968
969 preempt_enable();
970}