blob: 185255416e8538c22384ae89491720834aa6fcc1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/alpha/kernel/smp.c
3 *
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
8 *
9 * This is helpful for DCPI.
10 *
11 */
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/threads.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/interrupt.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/spinlock.h>
26#include <linux/irq.h>
27#include <linux/cache.h>
28#include <linux/profile.h>
29#include <linux/bitops.h>
30
31#include <asm/hwrpb.h>
32#include <asm/ptrace.h>
33#include <asm/atomic.h>
34
35#include <asm/io.h>
36#include <asm/irq.h>
37#include <asm/pgtable.h>
38#include <asm/pgalloc.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41
42#include "proto.h"
43#include "irq_impl.h"
44
45
46#define DEBUG_SMP 0
47#if DEBUG_SMP
48#define DBGS(args) printk args
49#else
50#define DBGS(args)
51#endif
52
53/* A collection of per-processor data. */
54struct cpuinfo_alpha cpu_data[NR_CPUS];
55
56/* A collection of single bit ipi messages. */
57static struct {
58 unsigned long bits ____cacheline_aligned;
59} ipi_data[NR_CPUS] __cacheline_aligned;
60
61enum ipi_message_type {
62 IPI_RESCHEDULE,
63 IPI_CALL_FUNC,
64 IPI_CPU_STOP,
65};
66
67/* Set to a secondary's cpuid when it comes online. */
68static int smp_secondary_alive __initdata = 0;
69
70/* Which cpus ids came online. */
71cpumask_t cpu_present_mask;
72cpumask_t cpu_online_map;
73
74EXPORT_SYMBOL(cpu_online_map);
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076int smp_num_probed; /* Internal processor count */
77int smp_num_cpus = 1; /* Number that came online. */
78
79extern void calibrate_delay(void);
80
81
82
83/*
84 * Called by both boot and secondaries to move global data into
85 * per-processor storage.
86 */
87static inline void __init
88smp_store_cpu_info(int cpuid)
89{
90 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
91 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
92 cpu_data[cpuid].need_new_asn = 0;
93 cpu_data[cpuid].asn_lock = 0;
94}
95
96/*
97 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
98 */
99static inline void __init
100smp_setup_percpu_timer(int cpuid)
101{
102 cpu_data[cpuid].prof_counter = 1;
103 cpu_data[cpuid].prof_multiplier = 1;
104}
105
106static void __init
107wait_boot_cpu_to_stop(int cpuid)
108{
109 unsigned long stop = jiffies + 10*HZ;
110
111 while (time_before(jiffies, stop)) {
112 if (!smp_secondary_alive)
113 return;
114 barrier();
115 }
116
117 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
118 for (;;)
119 barrier();
120}
121
122/*
123 * Where secondaries begin a life of C.
124 */
125void __init
126smp_callin(void)
127{
128 int cpuid = hard_smp_processor_id();
129
130 if (cpu_test_and_set(cpuid, cpu_online_map)) {
131 printk("??, cpu 0x%x already present??\n", cpuid);
132 BUG();
133 }
134
135 /* Turn on machine checks. */
136 wrmces(7);
137
138 /* Set trap vectors. */
139 trap_init();
140
141 /* Set interrupt vector. */
142 wrent(entInt, 0);
143
144 /* Get our local ticker going. */
145 smp_setup_percpu_timer(cpuid);
146
147 /* Call platform-specific callin, if specified */
148 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
149
150 /* All kernel threads share the same mm context. */
151 atomic_inc(&init_mm.mm_count);
152 current->active_mm = &init_mm;
153
154 /* Must have completely accurate bogos. */
155 local_irq_enable();
156
157 /* Wait boot CPU to stop with irq enabled before running
158 calibrate_delay. */
159 wait_boot_cpu_to_stop(cpuid);
160 mb();
161 calibrate_delay();
162
163 smp_store_cpu_info(cpuid);
164 /* Allow master to continue only after we written loops_per_jiffy. */
165 wmb();
166 smp_secondary_alive = 1;
167
168 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
169 cpuid, current, current->active_mm));
170
171 /* Do nothing. */
172 cpu_idle();
173}
174
175/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
176static int __init
177wait_for_txrdy (unsigned long cpumask)
178{
179 unsigned long timeout;
180
181 if (!(hwrpb->txrdy & cpumask))
182 return 0;
183
184 timeout = jiffies + 10*HZ;
185 while (time_before(jiffies, timeout)) {
186 if (!(hwrpb->txrdy & cpumask))
187 return 0;
188 udelay(10);
189 barrier();
190 }
191
192 return -1;
193}
194
195/*
196 * Send a message to a secondary's console. "START" is one such
197 * interesting message. ;-)
198 */
199static void __init
200send_secondary_console_msg(char *str, int cpuid)
201{
202 struct percpu_struct *cpu;
203 register char *cp1, *cp2;
204 unsigned long cpumask;
205 size_t len;
206
207 cpu = (struct percpu_struct *)
208 ((char*)hwrpb
209 + hwrpb->processor_offset
210 + cpuid * hwrpb->processor_size);
211
212 cpumask = (1UL << cpuid);
213 if (wait_for_txrdy(cpumask))
214 goto timeout;
215
216 cp2 = str;
217 len = strlen(cp2);
218 *(unsigned int *)&cpu->ipc_buffer[0] = len;
219 cp1 = (char *) &cpu->ipc_buffer[1];
220 memcpy(cp1, cp2, len);
221
222 /* atomic test and set */
223 wmb();
224 set_bit(cpuid, &hwrpb->rxrdy);
225
226 if (wait_for_txrdy(cpumask))
227 goto timeout;
228 return;
229
230 timeout:
231 printk("Processor %x not ready\n", cpuid);
232}
233
234/*
235 * A secondary console wants to send a message. Receive it.
236 */
237static void
238recv_secondary_console_msg(void)
239{
240 int mycpu, i, cnt;
241 unsigned long txrdy = hwrpb->txrdy;
242 char *cp1, *cp2, buf[80];
243 struct percpu_struct *cpu;
244
245 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
246
247 mycpu = hard_smp_processor_id();
248
249 for (i = 0; i < NR_CPUS; i++) {
250 if (!(txrdy & (1UL << i)))
251 continue;
252
253 DBGS(("recv_secondary_console_msg: "
254 "TXRDY contains CPU %d.\n", i));
255
256 cpu = (struct percpu_struct *)
257 ((char*)hwrpb
258 + hwrpb->processor_offset
259 + i * hwrpb->processor_size);
260
261 DBGS(("recv_secondary_console_msg: on %d from %d"
262 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
263 mycpu, i, cpu->halt_reason, cpu->flags));
264
265 cnt = cpu->ipc_buffer[0] >> 32;
266 if (cnt <= 0 || cnt >= 80)
267 strcpy(buf, "<<< BOGUS MSG >>>");
268 else {
269 cp1 = (char *) &cpu->ipc_buffer[11];
270 cp2 = buf;
271 strcpy(cp2, cp1);
272
273 while ((cp2 = strchr(cp2, '\r')) != 0) {
274 *cp2 = ' ';
275 if (cp2[1] == '\n')
276 cp2[1] = ' ';
277 }
278 }
279
280 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
281 "message is '%s'\n", mycpu, buf));
282 }
283
284 hwrpb->txrdy = 0;
285}
286
287/*
288 * Convince the console to have a secondary cpu begin execution.
289 */
290static int __init
291secondary_cpu_start(int cpuid, struct task_struct *idle)
292{
293 struct percpu_struct *cpu;
294 struct pcb_struct *hwpcb, *ipcb;
295 unsigned long timeout;
296
297 cpu = (struct percpu_struct *)
298 ((char*)hwrpb
299 + hwrpb->processor_offset
300 + cpuid * hwrpb->processor_size);
301 hwpcb = (struct pcb_struct *) cpu->hwpcb;
Al Viro37bfbaf2006-01-12 01:05:36 -0800302 ipcb = &task_thread_info(idle)->pcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
304 /* Initialize the CPU's HWPCB to something just good enough for
305 us to get started. Immediately after starting, we'll swpctx
306 to the target idle task's pcb. Reuse the stack in the mean
307 time. Precalculate the target PCBB. */
308 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
309 hwpcb->usp = 0;
310 hwpcb->ptbr = ipcb->ptbr;
311 hwpcb->pcc = 0;
312 hwpcb->asn = 0;
313 hwpcb->unique = virt_to_phys(ipcb);
314 hwpcb->flags = ipcb->flags;
315 hwpcb->res1 = hwpcb->res2 = 0;
316
317#if 0
318 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
319 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
320#endif
321 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
322 cpuid, idle->state, ipcb->flags));
323
324 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
325 hwrpb->CPU_restart = __smp_callin;
326 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
327
328 /* Recalculate and update the HWRPB checksum */
329 hwrpb_update_checksum(hwrpb);
330
331 /*
332 * Send a "start" command to the specified processor.
333 */
334
335 /* SRM III 3.4.1.3 */
336 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
337 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
338 wmb();
339
340 send_secondary_console_msg("START\r\n", cpuid);
341
342 /* Wait 10 seconds for an ACK from the console. */
343 timeout = jiffies + 10*HZ;
344 while (time_before(jiffies, timeout)) {
345 if (cpu->flags & 1)
346 goto started;
347 udelay(10);
348 barrier();
349 }
350 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
351 return -1;
352
353 started:
354 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
355 return 0;
356}
357
358/*
359 * Bring one cpu online.
360 */
361static int __init
362smp_boot_one_cpu(int cpuid)
363{
364 struct task_struct *idle;
365 unsigned long timeout;
366
367 /* Cook up an idler for this guy. Note that the address we
368 give to kernel_thread is irrelevant -- it's going to start
369 where HWRPB.CPU_restart says to start. But this gets all
370 the other task-y sort of data structures set up like we
371 wish. We can't use kernel_thread since we must avoid
372 rescheduling the child. */
373 idle = fork_idle(cpuid);
374 if (IS_ERR(idle))
375 panic("failed fork for CPU %d", cpuid);
376
377 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
378 cpuid, idle->state, idle->flags));
379
380 /* Signal the secondary to wait a moment. */
381 smp_secondary_alive = -1;
382
383 /* Whirrr, whirrr, whirrrrrrrrr... */
384 if (secondary_cpu_start(cpuid, idle))
385 return -1;
386
387 /* Notify the secondary CPU it can run calibrate_delay. */
388 mb();
389 smp_secondary_alive = 0;
390
391 /* We've been acked by the console; wait one second for
392 the task to start up for real. */
393 timeout = jiffies + 1*HZ;
394 while (time_before(jiffies, timeout)) {
395 if (smp_secondary_alive == 1)
396 goto alive;
397 udelay(10);
398 barrier();
399 }
400
401 /* We failed to boot the CPU. */
402
403 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
404 return -1;
405
406 alive:
407 /* Another "Red Snapper". */
408 return 0;
409}
410
411/*
412 * Called from setup_arch. Detect an SMP system and which processors
413 * are present.
414 */
415void __init
416setup_smp(void)
417{
418 struct percpu_struct *cpubase, *cpu;
419 unsigned long i;
420
421 if (boot_cpuid != 0) {
422 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
423 boot_cpuid);
424 }
425
426 if (hwrpb->nr_processors > 1) {
427 int boot_cpu_palrev;
428
429 DBGS(("setup_smp: nr_processors %ld\n",
430 hwrpb->nr_processors));
431
432 cpubase = (struct percpu_struct *)
433 ((char*)hwrpb + hwrpb->processor_offset);
434 boot_cpu_palrev = cpubase->pal_revision;
435
436 for (i = 0; i < hwrpb->nr_processors; i++) {
437 cpu = (struct percpu_struct *)
438 ((char *)cpubase + i*hwrpb->processor_size);
439 if ((cpu->flags & 0x1cc) == 0x1cc) {
440 smp_num_probed++;
441 /* Assume here that "whami" == index */
Brian Uhrain says917b1f72006-04-10 22:53:16 -0700442 cpu_set(i, cpu_present_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 cpu->pal_revision = boot_cpu_palrev;
444 }
445
446 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
447 i, cpu->flags, cpu->type));
448 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
449 i, cpu->pal_revision));
450 }
451 } else {
452 smp_num_probed = 1;
Brian Uhrain says917b1f72006-04-10 22:53:16 -0700453 cpu_set(boot_cpuid, cpu_present_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
Ivan Kokshaysky328c2a82006-02-08 11:55:06 +0300457 smp_num_probed, cpu_possible_map.bits[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458}
459
460/*
461 * Called by smp_init prepare the secondaries
462 */
463void __init
464smp_prepare_cpus(unsigned int max_cpus)
465{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 /* Take care of some initial bookkeeping. */
467 memset(ipi_data, 0, sizeof(ipi_data));
468
469 current_thread_info()->cpu = boot_cpuid;
470
471 smp_store_cpu_info(boot_cpuid);
472 smp_setup_percpu_timer(boot_cpuid);
473
474 /* Nothing to do on a UP box, or when told not to. */
475 if (smp_num_probed == 1 || max_cpus == 0) {
476 cpu_present_mask = cpumask_of_cpu(boot_cpuid);
477 printk(KERN_INFO "SMP mode deactivated.\n");
478 return;
479 }
480
481 printk(KERN_INFO "SMP starting up secondaries.\n");
482
Ivan Kokshaysky328c2a82006-02-08 11:55:06 +0300483 smp_num_cpus = smp_num_probed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484}
485
486void __devinit
487smp_prepare_boot_cpu(void)
488{
489 /*
Brian Uhrain says917b1f72006-04-10 22:53:16 -0700490 * Mark the boot cpu (current cpu) as online
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 cpu_set(smp_processor_id(), cpu_online_map);
493}
494
495int __devinit
496__cpu_up(unsigned int cpu)
497{
498 smp_boot_one_cpu(cpu);
499
500 return cpu_online(cpu) ? 0 : -ENOSYS;
501}
502
503void __init
504smp_cpus_done(unsigned int max_cpus)
505{
506 int cpu;
507 unsigned long bogosum = 0;
508
509 for(cpu = 0; cpu < NR_CPUS; cpu++)
510 if (cpu_online(cpu))
511 bogosum += cpu_data[cpu].loops_per_jiffy;
512
513 printk(KERN_INFO "SMP: Total of %d processors activated "
514 "(%lu.%02lu BogoMIPS).\n",
515 num_online_cpus(),
516 (bogosum + 2500) / (500000/HZ),
517 ((bogosum + 2500) / (5000/HZ)) % 100);
518}
519
520
521void
522smp_percpu_timer_interrupt(struct pt_regs *regs)
523{
524 int cpu = smp_processor_id();
525 unsigned long user = user_mode(regs);
526 struct cpuinfo_alpha *data = &cpu_data[cpu];
527
528 /* Record kernel PC. */
529 profile_tick(CPU_PROFILING, regs);
530
531 if (!--data->prof_counter) {
532 /* We need to make like a normal interrupt -- otherwise
533 timer interrupts ignore the global interrupt lock,
534 which would be a Bad Thing. */
535 irq_enter();
536
537 update_process_times(user);
538
539 data->prof_counter = data->prof_multiplier;
540
541 irq_exit();
542 }
543}
544
545int __init
546setup_profiling_timer(unsigned int multiplier)
547{
548 return -EINVAL;
549}
550
551
552static void
553send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
554{
555 int i;
556
557 mb();
558 for_each_cpu_mask(i, to_whom)
559 set_bit(operation, &ipi_data[i].bits);
560
561 mb();
562 for_each_cpu_mask(i, to_whom)
563 wripir(i);
564}
565
566/* Structure and data for smp_call_function. This is designed to
567 minimize static memory requirements. Plus it looks cleaner. */
568
569struct smp_call_struct {
570 void (*func) (void *info);
571 void *info;
572 long wait;
573 atomic_t unstarted_count;
574 atomic_t unfinished_count;
575};
576
577static struct smp_call_struct *smp_call_function_data;
578
579/* Atomicly drop data into a shared pointer. The pointer is free if
580 it is initially locked. If retry, spin until free. */
581
582static int
583pointer_lock (void *lock, void *data, int retry)
584{
585 void *old, *tmp;
586
587 mb();
588 again:
589 /* Compare and swap with zero. */
590 asm volatile (
591 "1: ldq_l %0,%1\n"
592 " mov %3,%2\n"
593 " bne %0,2f\n"
594 " stq_c %2,%1\n"
595 " beq %2,1b\n"
596 "2:"
597 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
598 : "r"(data)
599 : "memory");
600
601 if (old == 0)
602 return 0;
603 if (! retry)
604 return -EBUSY;
605
606 while (*(void **)lock)
607 barrier();
608 goto again;
609}
610
611void
612handle_ipi(struct pt_regs *regs)
613{
614 int this_cpu = smp_processor_id();
615 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
616 unsigned long ops;
617
618#if 0
619 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
620 this_cpu, *pending_ipis, regs->pc));
621#endif
622
623 mb(); /* Order interrupt and bit testing. */
624 while ((ops = xchg(pending_ipis, 0)) != 0) {
625 mb(); /* Order bit clearing and data access. */
626 do {
627 unsigned long which;
628
629 which = ops & -ops;
630 ops &= ~which;
631 which = __ffs(which);
632
633 switch (which) {
634 case IPI_RESCHEDULE:
635 /* Reschedule callback. Everything to be done
636 is done by the interrupt return path. */
637 break;
638
639 case IPI_CALL_FUNC:
640 {
641 struct smp_call_struct *data;
642 void (*func)(void *info);
643 void *info;
644 int wait;
645
646 data = smp_call_function_data;
647 func = data->func;
648 info = data->info;
649 wait = data->wait;
650
651 /* Notify the sending CPU that the data has been
652 received, and execution is about to begin. */
653 mb();
654 atomic_dec (&data->unstarted_count);
655
656 /* At this point the structure may be gone unless
657 wait is true. */
658 (*func)(info);
659
660 /* Notify the sending CPU that the task is done. */
661 mb();
662 if (wait) atomic_dec (&data->unfinished_count);
663 break;
664 }
665
666 case IPI_CPU_STOP:
667 halt();
668
669 default:
670 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
671 this_cpu, which);
672 break;
673 }
674 } while (ops);
675
676 mb(); /* Order data access and bit testing. */
677 }
678
679 cpu_data[this_cpu].ipi_count++;
680
681 if (hwrpb->txrdy)
682 recv_secondary_console_msg();
683}
684
685void
686smp_send_reschedule(int cpu)
687{
688#ifdef DEBUG_IPI_MSG
689 if (cpu == hard_smp_processor_id())
690 printk(KERN_WARNING
691 "smp_send_reschedule: Sending IPI to self.\n");
692#endif
693 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
694}
695
696void
697smp_send_stop(void)
698{
699 cpumask_t to_whom = cpu_possible_map;
700 cpu_clear(smp_processor_id(), to_whom);
701#ifdef DEBUG_IPI_MSG
702 if (hard_smp_processor_id() != boot_cpu_id)
703 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
704#endif
705 send_ipi_message(to_whom, IPI_CPU_STOP);
706}
707
708/*
709 * Run a function on all other CPUs.
710 * <func> The function to run. This must be fast and non-blocking.
711 * <info> An arbitrary pointer to pass to the function.
712 * <retry> If true, keep retrying until ready.
713 * <wait> If true, wait until function has completed on other CPUs.
714 * [RETURNS] 0 on success, else a negative status code.
715 *
716 * Does not return until remote CPUs are nearly ready to execute <func>
717 * or are or have executed.
718 * You must not call this function with disabled interrupts or from a
719 * hardware interrupt handler or from a bottom half handler.
720 */
721
722int
723smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
724 int wait, cpumask_t to_whom)
725{
726 struct smp_call_struct data;
727 unsigned long timeout;
728 int num_cpus_to_call;
729
730 /* Can deadlock when called with interrupts disabled */
731 WARN_ON(irqs_disabled());
732
733 data.func = func;
734 data.info = info;
735 data.wait = wait;
736
737 cpu_clear(smp_processor_id(), to_whom);
738 num_cpus_to_call = cpus_weight(to_whom);
739
740 atomic_set(&data.unstarted_count, num_cpus_to_call);
741 atomic_set(&data.unfinished_count, num_cpus_to_call);
742
743 /* Acquire the smp_call_function_data mutex. */
744 if (pointer_lock(&smp_call_function_data, &data, retry))
745 return -EBUSY;
746
747 /* Send a message to the requested CPUs. */
748 send_ipi_message(to_whom, IPI_CALL_FUNC);
749
750 /* Wait for a minimal response. */
751 timeout = jiffies + HZ;
752 while (atomic_read (&data.unstarted_count) > 0
753 && time_before (jiffies, timeout))
754 barrier();
755
756 /* If there's no response yet, log a message but allow a longer
757 * timeout period -- if we get a response this time, log
758 * a message saying when we got it..
759 */
760 if (atomic_read(&data.unstarted_count) > 0) {
761 long start_time = jiffies;
762 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
763 __FUNCTION__);
764 timeout = jiffies + 30 * HZ;
765 while (atomic_read(&data.unstarted_count) > 0
766 && time_before(jiffies, timeout))
767 barrier();
768 if (atomic_read(&data.unstarted_count) <= 0) {
769 long delta = jiffies - start_time;
770 printk(KERN_ERR
771 "%s: response %ld.%ld seconds into long wait\n",
772 __FUNCTION__, delta / HZ,
773 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
774 }
775 }
776
777 /* We either got one or timed out -- clear the lock. */
778 mb();
779 smp_call_function_data = NULL;
780
781 /*
782 * If after both the initial and long timeout periods we still don't
783 * have a response, something is very wrong...
784 */
785 BUG_ON(atomic_read (&data.unstarted_count) > 0);
786
787 /* Wait for a complete response, if needed. */
788 if (wait) {
789 while (atomic_read (&data.unfinished_count) > 0)
790 barrier();
791 }
792
793 return 0;
794}
795
796int
797smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
798{
799 return smp_call_function_on_cpu (func, info, retry, wait,
800 cpu_online_map);
801}
802
803static void
804ipi_imb(void *ignored)
805{
806 imb();
807}
808
809void
810smp_imb(void)
811{
812 /* Must wait other processors to flush their icache before continue. */
813 if (on_each_cpu(ipi_imb, NULL, 1, 1))
814 printk(KERN_CRIT "smp_imb: timed out\n");
815}
816
817static void
818ipi_flush_tlb_all(void *ignored)
819{
820 tbia();
821}
822
823void
824flush_tlb_all(void)
825{
826 /* Although we don't have any data to pass, we do want to
827 synchronize with the other processors. */
828 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
829 printk(KERN_CRIT "flush_tlb_all: timed out\n");
830 }
831}
832
833#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
834
835static void
836ipi_flush_tlb_mm(void *x)
837{
838 struct mm_struct *mm = (struct mm_struct *) x;
839 if (mm == current->active_mm && !asn_locked())
840 flush_tlb_current(mm);
841 else
842 flush_tlb_other(mm);
843}
844
845void
846flush_tlb_mm(struct mm_struct *mm)
847{
848 preempt_disable();
849
850 if (mm == current->active_mm) {
851 flush_tlb_current(mm);
852 if (atomic_read(&mm->mm_users) <= 1) {
853 int cpu, this_cpu = smp_processor_id();
854 for (cpu = 0; cpu < NR_CPUS; cpu++) {
855 if (!cpu_online(cpu) || cpu == this_cpu)
856 continue;
857 if (mm->context[cpu])
858 mm->context[cpu] = 0;
859 }
860 preempt_enable();
861 return;
862 }
863 }
864
865 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
866 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
867 }
868
869 preempt_enable();
870}
871
872struct flush_tlb_page_struct {
873 struct vm_area_struct *vma;
874 struct mm_struct *mm;
875 unsigned long addr;
876};
877
878static void
879ipi_flush_tlb_page(void *x)
880{
881 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
882 struct mm_struct * mm = data->mm;
883
884 if (mm == current->active_mm && !asn_locked())
885 flush_tlb_current_page(mm, data->vma, data->addr);
886 else
887 flush_tlb_other(mm);
888}
889
890void
891flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
892{
893 struct flush_tlb_page_struct data;
894 struct mm_struct *mm = vma->vm_mm;
895
896 preempt_disable();
897
898 if (mm == current->active_mm) {
899 flush_tlb_current_page(mm, vma, addr);
900 if (atomic_read(&mm->mm_users) <= 1) {
901 int cpu, this_cpu = smp_processor_id();
902 for (cpu = 0; cpu < NR_CPUS; cpu++) {
903 if (!cpu_online(cpu) || cpu == this_cpu)
904 continue;
905 if (mm->context[cpu])
906 mm->context[cpu] = 0;
907 }
908 preempt_enable();
909 return;
910 }
911 }
912
913 data.vma = vma;
914 data.mm = mm;
915 data.addr = addr;
916
917 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
918 printk(KERN_CRIT "flush_tlb_page: timed out\n");
919 }
920
921 preempt_enable();
922}
923
924void
925flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
926{
927 /* On the Alpha we always flush the whole user tlb. */
928 flush_tlb_mm(vma->vm_mm);
929}
930
931static void
932ipi_flush_icache_page(void *x)
933{
934 struct mm_struct *mm = (struct mm_struct *) x;
935 if (mm == current->active_mm && !asn_locked())
936 __load_new_mm_context(mm);
937 else
938 flush_tlb_other(mm);
939}
940
941void
942flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
943 unsigned long addr, int len)
944{
945 struct mm_struct *mm = vma->vm_mm;
946
947 if ((vma->vm_flags & VM_EXEC) == 0)
948 return;
949
950 preempt_disable();
951
952 if (mm == current->active_mm) {
953 __load_new_mm_context(mm);
954 if (atomic_read(&mm->mm_users) <= 1) {
955 int cpu, this_cpu = smp_processor_id();
956 for (cpu = 0; cpu < NR_CPUS; cpu++) {
957 if (!cpu_online(cpu) || cpu == this_cpu)
958 continue;
959 if (mm->context[cpu])
960 mm->context[cpu] = 0;
961 }
962 preempt_enable();
963 return;
964 }
965 }
966
967 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
968 printk(KERN_CRIT "flush_icache_page: timed out\n");
969 }
970
971 preempt_enable();
972}