blob: aba0f886b05b85d304e5735bcbc7149c179b042b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
13#include <linux/smp_lock.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21#include <linux/cache.h>
22#include <linux/jiffies.h>
23#include <linux/profile.h>
24#include <linux/bootmem.h>
25
26#include <asm/head.h>
27#include <asm/ptrace.h>
28#include <asm/atomic.h>
29#include <asm/tlbflush.h>
30#include <asm/mmu_context.h>
31#include <asm/cpudata.h>
32
33#include <asm/irq.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/oplib.h>
37#include <asm/uaccess.h>
38#include <asm/timer.h>
39#include <asm/starfire.h>
40#include <asm/tlb.h>
David S. Miller56fb4df2006-02-26 23:24:22 -080041#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043extern void calibrate_delay(void);
44
45/* Please don't make this stuff initdata!!! --DaveM */
46static unsigned char boot_cpu_id;
47
Andrew Mortonc12a8282005-07-12 12:09:43 -070048cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
49cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050static cpumask_t smp_commenced_mask;
51static cpumask_t cpu_callout_map;
52
53void smp_info(struct seq_file *m)
54{
55 int i;
56
57 seq_printf(m, "State:\n");
58 for (i = 0; i < NR_CPUS; i++) {
59 if (cpu_online(i))
60 seq_printf(m,
61 "CPU%d:\t\tonline\n", i);
62 }
63}
64
65void smp_bogo(struct seq_file *m)
66{
67 int i;
68
69 for (i = 0; i < NR_CPUS; i++)
70 if (cpu_online(i))
71 seq_printf(m,
72 "Cpu%dBogo\t: %lu.%02lu\n"
73 "Cpu%dClkTck\t: %016lx\n",
74 i, cpu_data(i).udelay_val / (500000/HZ),
75 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
76 i, cpu_data(i).clock_tick);
77}
78
79void __init smp_store_cpu_info(int id)
80{
81 int cpu_node;
82
83 /* multiplier and counter set by
84 smp_setup_percpu_timer() */
85 cpu_data(id).udelay_val = loops_per_jiffy;
86
87 cpu_find_by_mid(id, &cpu_node);
88 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
89 "clock-frequency", 0);
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 cpu_data(id).idle_volume = 1;
David S. Miller80dc0d62005-09-26 00:32:17 -070092
93 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
94 16 * 1024);
95 cpu_data(id).dcache_line_size =
96 prom_getintdefault(cpu_node, "dcache-line-size", 32);
97 cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
98 16 * 1024);
99 cpu_data(id).icache_line_size =
100 prom_getintdefault(cpu_node, "icache-line-size", 32);
101 cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
102 4 * 1024 * 1024);
103 cpu_data(id).ecache_line_size =
104 prom_getintdefault(cpu_node, "ecache-line-size", 64);
105 printk("CPU[%d]: Caches "
106 "D[sz(%d):line_sz(%d)] "
107 "I[sz(%d):line_sz(%d)] "
108 "E[sz(%d):line_sz(%d)]\n",
109 id,
110 cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
111 cpu_data(id).icache_size, cpu_data(id).icache_line_size,
112 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113}
114
115static void smp_setup_percpu_timer(void);
116
117static volatile unsigned long callin_flag = 0;
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119void __init smp_callin(void)
120{
121 int cpuid = hard_smp_processor_id();
122
David S. Miller56fb4df2006-02-26 23:24:22 -0800123 __local_per_cpu_offset = __per_cpu_offset(cpuid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
David S. Miller56fb4df2006-02-26 23:24:22 -0800125 __flush_tlb_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 smp_setup_percpu_timer();
128
David S. Miller816242d2005-05-23 15:52:08 -0700129 if (cheetah_pcache_forced_on)
130 cheetah_enable_pcache();
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 local_irq_enable();
133
134 calibrate_delay();
135 smp_store_cpu_info(cpuid);
136 callin_flag = 1;
137 __asm__ __volatile__("membar #Sync\n\t"
138 "flush %%g6" : : : "memory");
139
140 /* Clear this or we will die instantly when we
141 * schedule back to this idler...
142 */
David S. Millerdb7d9a42005-07-24 19:36:26 -0700143 current_thread_info()->new_child = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145 /* Attach to the address space of init_task. */
146 atomic_inc(&init_mm.mm_count);
147 current->active_mm = &init_mm;
148
149 while (!cpu_isset(cpuid, smp_commenced_mask))
David S. Miller4f071182005-08-29 12:46:22 -0700150 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152 cpu_set(cpuid, cpu_online_map);
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800153
154 /* idle thread is expected to have preempt disabled */
155 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
158void cpu_panic(void)
159{
160 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
161 panic("SMP bolixed\n");
162}
163
David S. Millerd369ddd2005-07-10 15:45:11 -0700164static unsigned long current_tick_offset __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166/* This tick register synchronization scheme is taken entirely from
167 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
168 *
169 * The only change I've made is to rework it so that the master
170 * initiates the synchonization instead of the slave. -DaveM
171 */
172
173#define MASTER 0
174#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
175
176#define NUM_ROUNDS 64 /* magic value */
177#define NUM_ITERS 5 /* likewise */
178
179static DEFINE_SPINLOCK(itc_sync_lock);
180static unsigned long go[SLAVE + 1];
181
182#define DEBUG_TICK_SYNC 0
183
184static inline long get_delta (long *rt, long *master)
185{
186 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
187 unsigned long tcenter, t0, t1, tm;
188 unsigned long i;
189
190 for (i = 0; i < NUM_ITERS; i++) {
191 t0 = tick_ops->get_tick();
192 go[MASTER] = 1;
David S. Miller4f071182005-08-29 12:46:22 -0700193 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 while (!(tm = go[SLAVE]))
David S. Miller4f071182005-08-29 12:46:22 -0700195 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 go[SLAVE] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700197 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 t1 = tick_ops->get_tick();
199
200 if (t1 - t0 < best_t1 - best_t0)
201 best_t0 = t0, best_t1 = t1, best_tm = tm;
202 }
203
204 *rt = best_t1 - best_t0;
205 *master = best_tm - best_t0;
206
207 /* average best_t0 and best_t1 without overflow: */
208 tcenter = (best_t0/2 + best_t1/2);
209 if (best_t0 % 2 + best_t1 % 2 == 2)
210 tcenter++;
211 return tcenter - best_tm;
212}
213
214void smp_synchronize_tick_client(void)
215{
216 long i, delta, adj, adjust_latency = 0, done = 0;
217 unsigned long flags, rt, master_time_stamp, bound;
218#if DEBUG_TICK_SYNC
219 struct {
220 long rt; /* roundtrip time */
221 long master; /* master's timestamp */
222 long diff; /* difference between midpoint and master's timestamp */
223 long lat; /* estimate of itc adjustment latency */
224 } t[NUM_ROUNDS];
225#endif
226
227 go[MASTER] = 1;
228
229 while (go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700230 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232 local_irq_save(flags);
233 {
234 for (i = 0; i < NUM_ROUNDS; i++) {
235 delta = get_delta(&rt, &master_time_stamp);
236 if (delta == 0) {
237 done = 1; /* let's lock on to this... */
238 bound = rt;
239 }
240
241 if (!done) {
242 if (i > 0) {
243 adjust_latency += -delta;
244 adj = -delta + adjust_latency/4;
245 } else
246 adj = -delta;
247
248 tick_ops->add_tick(adj, current_tick_offset);
249 }
250#if DEBUG_TICK_SYNC
251 t[i].rt = rt;
252 t[i].master = master_time_stamp;
253 t[i].diff = delta;
254 t[i].lat = adjust_latency/4;
255#endif
256 }
257 }
258 local_irq_restore(flags);
259
260#if DEBUG_TICK_SYNC
261 for (i = 0; i < NUM_ROUNDS; i++)
262 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
263 t[i].rt, t[i].master, t[i].diff, t[i].lat);
264#endif
265
266 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
267 "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
268}
269
270static void smp_start_sync_tick_client(int cpu);
271
272static void smp_synchronize_one_tick(int cpu)
273{
274 unsigned long flags, i;
275
276 go[MASTER] = 0;
277
278 smp_start_sync_tick_client(cpu);
279
280 /* wait for client to be ready */
281 while (!go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700282 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* now let the client proceed into his loop */
285 go[MASTER] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700286 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288 spin_lock_irqsave(&itc_sync_lock, flags);
289 {
290 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
291 while (!go[MASTER])
David S. Miller4f071182005-08-29 12:46:22 -0700292 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 go[MASTER] = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700294 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 go[SLAVE] = tick_ops->get_tick();
David S. Miller4f071182005-08-29 12:46:22 -0700296 membar_storeload();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 }
298 }
299 spin_unlock_irqrestore(&itc_sync_lock, flags);
300}
301
302extern unsigned long sparc64_cpu_startup;
303
304/* The OBP cpu startup callback truncates the 3rd arg cookie to
305 * 32-bits (I think) so to be safe we have it read the pointer
306 * contained here so we work on >4GB machines. -DaveM
307 */
308static struct thread_info *cpu_new_thread = NULL;
309
310static int __devinit smp_boot_one_cpu(unsigned int cpu)
311{
312 unsigned long entry =
313 (unsigned long)(&sparc64_cpu_startup);
314 unsigned long cookie =
315 (unsigned long)(&cpu_new_thread);
316 struct task_struct *p;
317 int timeout, ret, cpu_node;
318
319 p = fork_idle(cpu);
320 callin_flag = 0;
Al Virof3169642006-01-12 01:05:42 -0800321 cpu_new_thread = task_thread_info(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 cpu_set(cpu, cpu_callout_map);
323
324 cpu_find_by_mid(cpu, &cpu_node);
325 prom_startcpu(cpu_node, entry, cookie);
326
327 for (timeout = 0; timeout < 5000000; timeout++) {
328 if (callin_flag)
329 break;
330 udelay(100);
331 }
332 if (callin_flag) {
333 ret = 0;
334 } else {
335 printk("Processor %d is stuck.\n", cpu);
336 cpu_clear(cpu, cpu_callout_map);
337 ret = -ENODEV;
338 }
339 cpu_new_thread = NULL;
340
341 return ret;
342}
343
344static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
345{
346 u64 result, target;
347 int stuck, tmp;
348
349 if (this_is_starfire) {
350 /* map to real upaid */
351 cpu = (((cpu & 0x3c) << 1) |
352 ((cpu & 0x40) >> 4) |
353 (cpu & 0x3));
354 }
355
356 target = (cpu << 14) | 0x70;
357again:
358 /* Ok, this is the real Spitfire Errata #54.
359 * One must read back from a UDB internal register
360 * after writes to the UDB interrupt dispatch, but
361 * before the membar Sync for that write.
362 * So we use the high UDB control register (ASI 0x7f,
363 * ADDR 0x20) for the dummy read. -DaveM
364 */
365 tmp = 0x40;
366 __asm__ __volatile__(
367 "wrpr %1, %2, %%pstate\n\t"
368 "stxa %4, [%0] %3\n\t"
369 "stxa %5, [%0+%8] %3\n\t"
370 "add %0, %8, %0\n\t"
371 "stxa %6, [%0+%8] %3\n\t"
372 "membar #Sync\n\t"
373 "stxa %%g0, [%7] %3\n\t"
374 "membar #Sync\n\t"
375 "mov 0x20, %%g1\n\t"
376 "ldxa [%%g1] 0x7f, %%g0\n\t"
377 "membar #Sync"
378 : "=r" (tmp)
379 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
380 "r" (data0), "r" (data1), "r" (data2), "r" (target),
381 "r" (0x10), "0" (tmp)
382 : "g1");
383
384 /* NOTE: PSTATE_IE is still clear. */
385 stuck = 100000;
386 do {
387 __asm__ __volatile__("ldxa [%%g0] %1, %0"
388 : "=r" (result)
389 : "i" (ASI_INTR_DISPATCH_STAT));
390 if (result == 0) {
391 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
392 : : "r" (pstate));
393 return;
394 }
395 stuck -= 1;
396 if (stuck == 0)
397 break;
398 } while (result & 0x1);
399 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
400 : : "r" (pstate));
401 if (stuck == 0) {
402 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
403 smp_processor_id(), result);
404 } else {
405 udelay(2);
406 goto again;
407 }
408}
409
410static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
411{
412 u64 pstate;
413 int i;
414
415 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
416 for_each_cpu_mask(i, mask)
417 spitfire_xcall_helper(data0, data1, data2, pstate, i);
418}
419
420/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
421 * packet, but we have no use for that. However we do take advantage of
422 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
423 */
424static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
425{
426 u64 pstate, ver;
David S. Miller92704a12006-02-26 23:27:19 -0800427 int nack_busy_id, is_jbus;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
429 if (cpus_empty(mask))
430 return;
431
432 /* Unfortunately, someone at Sun had the brilliant idea to make the
433 * busy/nack fields hard-coded by ITID number for this Ultra-III
434 * derivative processor.
435 */
436 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
David S. Miller92704a12006-02-26 23:27:19 -0800437 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
438 (ver >> 32) == __SERRANO_ID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
440 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
441
442retry:
443 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
444 : : "r" (pstate), "i" (PSTATE_IE));
445
446 /* Setup the dispatch data registers. */
447 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
448 "stxa %1, [%4] %6\n\t"
449 "stxa %2, [%5] %6\n\t"
450 "membar #Sync\n\t"
451 : /* no outputs */
452 : "r" (data0), "r" (data1), "r" (data2),
453 "r" (0x40), "r" (0x50), "r" (0x60),
454 "i" (ASI_INTR_W));
455
456 nack_busy_id = 0;
457 {
458 int i;
459
460 for_each_cpu_mask(i, mask) {
461 u64 target = (i << 14) | 0x70;
462
David S. Miller92704a12006-02-26 23:27:19 -0800463 if (!is_jbus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 target |= (nack_busy_id << 24);
465 __asm__ __volatile__(
466 "stxa %%g0, [%0] %1\n\t"
467 "membar #Sync\n\t"
468 : /* no outputs */
469 : "r" (target), "i" (ASI_INTR_W));
470 nack_busy_id++;
471 }
472 }
473
474 /* Now, poll for completion. */
475 {
476 u64 dispatch_stat;
477 long stuck;
478
479 stuck = 100000 * nack_busy_id;
480 do {
481 __asm__ __volatile__("ldxa [%%g0] %1, %0"
482 : "=r" (dispatch_stat)
483 : "i" (ASI_INTR_DISPATCH_STAT));
484 if (dispatch_stat == 0UL) {
485 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
486 : : "r" (pstate));
487 return;
488 }
489 if (!--stuck)
490 break;
491 } while (dispatch_stat & 0x5555555555555555UL);
492
493 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
494 : : "r" (pstate));
495
496 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
497 /* Busy bits will not clear, continue instead
498 * of freezing up on this cpu.
499 */
500 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
501 smp_processor_id(), dispatch_stat);
502 } else {
503 int i, this_busy_nack = 0;
504
505 /* Delay some random time with interrupts enabled
506 * to prevent deadlock.
507 */
508 udelay(2 * nack_busy_id);
509
510 /* Clear out the mask bits for cpus which did not
511 * NACK us.
512 */
513 for_each_cpu_mask(i, mask) {
514 u64 check_mask;
515
David S. Miller92704a12006-02-26 23:27:19 -0800516 if (is_jbus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 check_mask = (0x2UL << (2*i));
518 else
519 check_mask = (0x2UL <<
520 this_busy_nack);
521 if ((dispatch_stat & check_mask) == 0)
522 cpu_clear(i, mask);
523 this_busy_nack += 2;
524 }
525
526 goto retry;
527 }
528 }
529}
530
David S. Millera43fe0e2006-02-04 03:10:53 -0800531static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
532{
533 /* XXX implement me */
534}
535
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536/* Send cross call to all processors mentioned in MASK
537 * except self.
538 */
539static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
540{
541 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
542 int this_cpu = get_cpu();
543
544 cpus_and(mask, mask, cpu_online_map);
545 cpu_clear(this_cpu, mask);
546
547 if (tlb_type == spitfire)
548 spitfire_xcall_deliver(data0, data1, data2, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800549 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 cheetah_xcall_deliver(data0, data1, data2, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800551 else
552 hypervisor_xcall_deliver(data0, data1, data2, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 /* NOTE: Caller runs local copy on master. */
554
555 put_cpu();
556}
557
558extern unsigned long xcall_sync_tick;
559
560static void smp_start_sync_tick_client(int cpu)
561{
562 cpumask_t mask = cpumask_of_cpu(cpu);
563
564 smp_cross_call_masked(&xcall_sync_tick,
565 0, 0, 0, mask);
566}
567
568/* Send cross call to all processors except self. */
569#define smp_cross_call(func, ctx, data1, data2) \
570 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
571
572struct call_data_struct {
573 void (*func) (void *info);
574 void *info;
575 atomic_t finished;
576 int wait;
577};
578
579static DEFINE_SPINLOCK(call_lock);
580static struct call_data_struct *call_data;
581
582extern unsigned long xcall_call_function;
583
584/*
585 * You must not call this function with disabled interrupts or from a
586 * hardware interrupt handler or from a bottom half handler.
587 */
David S. Millerbd407912006-01-31 18:31:38 -0800588static int smp_call_function_mask(void (*func)(void *info), void *info,
589 int nonatomic, int wait, cpumask_t mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590{
591 struct call_data_struct data;
David S. Millerbd407912006-01-31 18:31:38 -0800592 int cpus = cpus_weight(mask) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 long timeout;
594
595 if (!cpus)
596 return 0;
597
598 /* Can deadlock when called with interrupts disabled */
599 WARN_ON(irqs_disabled());
600
601 data.func = func;
602 data.info = info;
603 atomic_set(&data.finished, 0);
604 data.wait = wait;
605
606 spin_lock(&call_lock);
607
608 call_data = &data;
609
David S. Millerbd407912006-01-31 18:31:38 -0800610 smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
612 /*
613 * Wait for other cpus to complete function or at
614 * least snap the call data.
615 */
616 timeout = 1000000;
617 while (atomic_read(&data.finished) != cpus) {
618 if (--timeout <= 0)
619 goto out_timeout;
620 barrier();
621 udelay(1);
622 }
623
624 spin_unlock(&call_lock);
625
626 return 0;
627
628out_timeout:
629 spin_unlock(&call_lock);
630 printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
631 (long) num_online_cpus() - 1L,
632 (long) atomic_read(&data.finished));
633 return 0;
634}
635
David S. Millerbd407912006-01-31 18:31:38 -0800636int smp_call_function(void (*func)(void *info), void *info,
637 int nonatomic, int wait)
638{
639 return smp_call_function_mask(func, info, nonatomic, wait,
640 cpu_online_map);
641}
642
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643void smp_call_function_client(int irq, struct pt_regs *regs)
644{
645 void (*func) (void *info) = call_data->func;
646 void *info = call_data->info;
647
648 clear_softint(1 << irq);
649 if (call_data->wait) {
650 /* let initiator proceed only after completion */
651 func(info);
652 atomic_inc(&call_data->finished);
653 } else {
654 /* let initiator proceed after getting data */
655 atomic_inc(&call_data->finished);
656 func(info);
657 }
658}
659
David S. Millerbd407912006-01-31 18:31:38 -0800660static void tsb_sync(void *info)
661{
662 struct mm_struct *mm = info;
663
664 if (current->active_mm == mm)
665 tsb_context_switch(mm);
666}
667
668void smp_tsb_sync(struct mm_struct *mm)
669{
670 smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
671}
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673extern unsigned long xcall_flush_tlb_mm;
674extern unsigned long xcall_flush_tlb_pending;
675extern unsigned long xcall_flush_tlb_kernel_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676extern unsigned long xcall_report_regs;
677extern unsigned long xcall_receive_signal;
678
679#ifdef DCACHE_ALIASING_POSSIBLE
680extern unsigned long xcall_flush_dcache_page_cheetah;
681#endif
682extern unsigned long xcall_flush_dcache_page_spitfire;
683
684#ifdef CONFIG_DEBUG_DCFLUSH
685extern atomic_t dcpage_flushes;
686extern atomic_t dcpage_flushes_xcall;
687#endif
688
689static __inline__ void __local_flush_dcache_page(struct page *page)
690{
691#ifdef DCACHE_ALIASING_POSSIBLE
692 __flush_dcache_page(page_address(page),
693 ((tlb_type == spitfire) &&
694 page_mapping(page) != NULL));
695#else
696 if (page_mapping(page) != NULL &&
697 tlb_type == spitfire)
698 __flush_icache_page(__pa(page_address(page)));
699#endif
700}
701
702void smp_flush_dcache_page_impl(struct page *page, int cpu)
703{
704 cpumask_t mask = cpumask_of_cpu(cpu);
David S. Millera43fe0e2006-02-04 03:10:53 -0800705 int this_cpu;
706
707 if (tlb_type == hypervisor)
708 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710#ifdef CONFIG_DEBUG_DCFLUSH
711 atomic_inc(&dcpage_flushes);
712#endif
David S. Millera43fe0e2006-02-04 03:10:53 -0800713
714 this_cpu = get_cpu();
715
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if (cpu == this_cpu) {
717 __local_flush_dcache_page(page);
718 } else if (cpu_online(cpu)) {
719 void *pg_addr = page_address(page);
720 u64 data0;
721
722 if (tlb_type == spitfire) {
723 data0 =
724 ((u64)&xcall_flush_dcache_page_spitfire);
725 if (page_mapping(page) != NULL)
726 data0 |= ((u64)1 << 32);
727 spitfire_xcall_deliver(data0,
728 __pa(pg_addr),
729 (u64) pg_addr,
730 mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800731 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732#ifdef DCACHE_ALIASING_POSSIBLE
733 data0 =
734 ((u64)&xcall_flush_dcache_page_cheetah);
735 cheetah_xcall_deliver(data0,
736 __pa(pg_addr),
737 0, mask);
738#endif
739 }
740#ifdef CONFIG_DEBUG_DCFLUSH
741 atomic_inc(&dcpage_flushes_xcall);
742#endif
743 }
744
745 put_cpu();
746}
747
748void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
749{
750 void *pg_addr = page_address(page);
751 cpumask_t mask = cpu_online_map;
752 u64 data0;
David S. Millera43fe0e2006-02-04 03:10:53 -0800753 int this_cpu;
754
755 if (tlb_type == hypervisor)
756 return;
757
758 this_cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
760 cpu_clear(this_cpu, mask);
761
762#ifdef CONFIG_DEBUG_DCFLUSH
763 atomic_inc(&dcpage_flushes);
764#endif
765 if (cpus_empty(mask))
766 goto flush_self;
767 if (tlb_type == spitfire) {
768 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
769 if (page_mapping(page) != NULL)
770 data0 |= ((u64)1 << 32);
771 spitfire_xcall_deliver(data0,
772 __pa(pg_addr),
773 (u64) pg_addr,
774 mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800775 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776#ifdef DCACHE_ALIASING_POSSIBLE
777 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
778 cheetah_xcall_deliver(data0,
779 __pa(pg_addr),
780 0, mask);
781#endif
782 }
783#ifdef CONFIG_DEBUG_DCFLUSH
784 atomic_inc(&dcpage_flushes_xcall);
785#endif
786 flush_self:
787 __local_flush_dcache_page(page);
788
789 put_cpu();
790}
791
792void smp_receive_signal(int cpu)
793{
794 cpumask_t mask = cpumask_of_cpu(cpu);
795
796 if (cpu_online(cpu)) {
797 u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
798
799 if (tlb_type == spitfire)
800 spitfire_xcall_deliver(data0, 0, 0, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800801 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 cheetah_xcall_deliver(data0, 0, 0, mask);
David S. Millera43fe0e2006-02-04 03:10:53 -0800803 else if (tlb_type == hypervisor)
804 hypervisor_xcall_deliver(data0, 0, 0, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 }
806}
807
808void smp_receive_signal_client(int irq, struct pt_regs *regs)
809{
810 /* Just return, rtrap takes care of the rest. */
811 clear_softint(1 << irq);
812}
813
814void smp_report_regs(void)
815{
816 smp_cross_call(&xcall_report_regs, 0, 0, 0);
817}
818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819/* We know that the window frames of the user have been flushed
820 * to the stack before we get here because all callers of us
821 * are flush_tlb_*() routines, and these run after flush_cache_*()
822 * which performs the flushw.
823 *
824 * The SMP TLB coherency scheme we use works as follows:
825 *
826 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
827 * space has (potentially) executed on, this is the heuristic
828 * we use to avoid doing cross calls.
829 *
830 * Also, for flushing from kswapd and also for clones, we
831 * use cpu_vm_mask as the list of cpus to make run the TLB.
832 *
833 * 2) TLB context numbers are shared globally across all processors
834 * in the system, this allows us to play several games to avoid
835 * cross calls.
836 *
837 * One invariant is that when a cpu switches to a process, and
838 * that processes tsk->active_mm->cpu_vm_mask does not have the
839 * current cpu's bit set, that tlb context is flushed locally.
840 *
841 * If the address space is non-shared (ie. mm->count == 1) we avoid
842 * cross calls when we want to flush the currently running process's
843 * tlb state. This is done by clearing all cpu bits except the current
844 * processor's in current->active_mm->cpu_vm_mask and performing the
845 * flush locally only. This will force any subsequent cpus which run
846 * this task to flush the context from the local tlb if the process
847 * migrates to another cpu (again).
848 *
849 * 3) For shared address spaces (threads) and swapping we bite the
850 * bullet for most cases and perform the cross call (but only to
851 * the cpus listed in cpu_vm_mask).
852 *
853 * The performance gain from "optimizing" away the cross call for threads is
854 * questionable (in theory the big win for threads is the massive sharing of
855 * address space state across processors).
856 */
David S. Miller62dbec72005-11-07 14:09:58 -0800857
858/* This currently is only used by the hugetlb arch pre-fault
859 * hook on UltraSPARC-III+ and later when changing the pagesize
860 * bits of the context register for an address space.
861 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862void smp_flush_tlb_mm(struct mm_struct *mm)
863{
David S. Miller62dbec72005-11-07 14:09:58 -0800864 u32 ctx = CTX_HWBITS(mm->context);
865 int cpu = get_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
David S. Miller62dbec72005-11-07 14:09:58 -0800867 if (atomic_read(&mm->mm_users) == 1) {
868 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
869 goto local_flush_and_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 }
David S. Miller62dbec72005-11-07 14:09:58 -0800871
872 smp_cross_call_masked(&xcall_flush_tlb_mm,
873 ctx, 0, 0,
874 mm->cpu_vm_mask);
875
876local_flush_and_out:
877 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
878
879 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880}
881
882void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
883{
884 u32 ctx = CTX_HWBITS(mm->context);
885 int cpu = get_cpu();
886
Hugh Dickinsdedeb002005-11-07 14:09:01 -0800887 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
Hugh Dickinsdedeb002005-11-07 14:09:01 -0800889 else
890 smp_cross_call_masked(&xcall_flush_tlb_pending,
891 ctx, nr, (unsigned long) vaddrs,
892 mm->cpu_vm_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 __flush_tlb_pending(ctx, nr, vaddrs);
895
896 put_cpu();
897}
898
899void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
900{
901 start &= PAGE_MASK;
902 end = PAGE_ALIGN(end);
903 if (start != end) {
904 smp_cross_call(&xcall_flush_tlb_kernel_range,
905 0, start, end);
906
907 __flush_tlb_kernel_range(start, end);
908 }
909}
910
911/* CPU capture. */
912/* #define CAPTURE_DEBUG */
913extern unsigned long xcall_capture;
914
915static atomic_t smp_capture_depth = ATOMIC_INIT(0);
916static atomic_t smp_capture_registry = ATOMIC_INIT(0);
917static unsigned long penguins_are_doing_time;
918
919void smp_capture(void)
920{
921 int result = atomic_add_ret(1, &smp_capture_depth);
922
923 if (result == 1) {
924 int ncpus = num_online_cpus();
925
926#ifdef CAPTURE_DEBUG
927 printk("CPU[%d]: Sending penguins to jail...",
928 smp_processor_id());
929#endif
930 penguins_are_doing_time = 1;
David S. Miller4f071182005-08-29 12:46:22 -0700931 membar_storestore_loadstore();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 atomic_inc(&smp_capture_registry);
933 smp_cross_call(&xcall_capture, 0, 0, 0);
934 while (atomic_read(&smp_capture_registry) != ncpus)
David S. Miller4f071182005-08-29 12:46:22 -0700935 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936#ifdef CAPTURE_DEBUG
937 printk("done\n");
938#endif
939 }
940}
941
942void smp_release(void)
943{
944 if (atomic_dec_and_test(&smp_capture_depth)) {
945#ifdef CAPTURE_DEBUG
946 printk("CPU[%d]: Giving pardon to "
947 "imprisoned penguins\n",
948 smp_processor_id());
949#endif
950 penguins_are_doing_time = 0;
David S. Miller4f071182005-08-29 12:46:22 -0700951 membar_storeload_storestore();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 atomic_dec(&smp_capture_registry);
953 }
954}
955
956/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
957 * can service tlb flush xcalls...
958 */
959extern void prom_world(int);
David S. Miller96c6e0d2006-01-31 18:32:29 -0800960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961void smp_penguin_jailcell(int irq, struct pt_regs *regs)
962{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 clear_softint(1 << irq);
964
965 preempt_disable();
966
967 __asm__ __volatile__("flushw");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 prom_world(1);
969 atomic_inc(&smp_capture_registry);
David S. Miller4f071182005-08-29 12:46:22 -0700970 membar_storeload_storestore();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 while (penguins_are_doing_time)
David S. Miller4f071182005-08-29 12:46:22 -0700972 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 atomic_dec(&smp_capture_registry);
974 prom_world(0);
975
976 preempt_enable();
977}
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
980#define prof_counter(__cpu) cpu_data(__cpu).counter
981
982void smp_percpu_timer_interrupt(struct pt_regs *regs)
983{
984 unsigned long compare, tick, pstate;
985 int cpu = smp_processor_id();
986 int user = user_mode(regs);
987
988 /*
989 * Check for level 14 softint.
990 */
991 {
992 unsigned long tick_mask = tick_ops->softint_mask;
993
994 if (!(get_softint() & tick_mask)) {
995 extern void handler_irq(int, struct pt_regs *);
996
997 handler_irq(14, regs);
998 return;
999 }
1000 clear_softint(tick_mask);
1001 }
1002
1003 do {
1004 profile_tick(CPU_PROFILING, regs);
1005 if (!--prof_counter(cpu)) {
1006 irq_enter();
1007
1008 if (cpu == boot_cpu_id) {
1009 kstat_this_cpu.irqs[0]++;
1010 timer_tick_interrupt(regs);
1011 }
1012
1013 update_process_times(user);
1014
1015 irq_exit();
1016
1017 prof_counter(cpu) = prof_multiplier(cpu);
1018 }
1019
1020 /* Guarantee that the following sequences execute
1021 * uninterrupted.
1022 */
1023 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1024 "wrpr %0, %1, %%pstate"
1025 : "=r" (pstate)
1026 : "i" (PSTATE_IE));
1027
1028 compare = tick_ops->add_compare(current_tick_offset);
1029 tick = tick_ops->get_tick();
1030
1031 /* Restore PSTATE_IE. */
1032 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1033 : /* no outputs */
1034 : "r" (pstate));
1035 } while (time_after_eq(tick, compare));
1036}
1037
1038static void __init smp_setup_percpu_timer(void)
1039{
1040 int cpu = smp_processor_id();
1041 unsigned long pstate;
1042
1043 prof_counter(cpu) = prof_multiplier(cpu) = 1;
1044
1045 /* Guarantee that the following sequences execute
1046 * uninterrupted.
1047 */
1048 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1049 "wrpr %0, %1, %%pstate"
1050 : "=r" (pstate)
1051 : "i" (PSTATE_IE));
1052
1053 tick_ops->init_tick(current_tick_offset);
1054
1055 /* Restore PSTATE_IE. */
1056 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1057 : /* no outputs */
1058 : "r" (pstate));
1059}
1060
1061void __init smp_tick_init(void)
1062{
1063 boot_cpu_id = hard_smp_processor_id();
1064 current_tick_offset = timer_tick_offset;
1065
1066 cpu_set(boot_cpu_id, cpu_online_map);
1067 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1068}
1069
1070/* /proc/profile writes can call this, don't __init it please. */
1071static DEFINE_SPINLOCK(prof_setup_lock);
1072
1073int setup_profiling_timer(unsigned int multiplier)
1074{
1075 unsigned long flags;
1076 int i;
1077
1078 if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1079 return -EINVAL;
1080
1081 spin_lock_irqsave(&prof_setup_lock, flags);
1082 for (i = 0; i < NR_CPUS; i++)
1083 prof_multiplier(i) = multiplier;
1084 current_tick_offset = (timer_tick_offset / multiplier);
1085 spin_unlock_irqrestore(&prof_setup_lock, flags);
1086
1087 return 0;
1088}
1089
David S. Miller7abea922006-02-25 13:39:56 -08001090/* Constrain the number of cpus to max_cpus. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091void __init smp_prepare_cpus(unsigned int max_cpus)
1092{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (num_possible_cpus() > max_cpus) {
David S. Miller7abea922006-02-25 13:39:56 -08001094 int instance, mid;
1095
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 instance = 0;
1097 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1098 if (mid != boot_cpu_id) {
1099 cpu_clear(mid, phys_cpu_present_map);
1100 if (num_possible_cpus() <= max_cpus)
1101 break;
1102 }
1103 instance++;
1104 }
1105 }
1106
1107 smp_store_cpu_info(boot_cpu_id);
1108}
1109
David S. Miller7abea922006-02-25 13:39:56 -08001110/* Set this up early so that things like the scheduler can init
1111 * properly. We use the same cpu mask for both the present and
1112 * possible cpu map.
1113 */
1114void __init smp_setup_cpu_possible_map(void)
1115{
1116 int instance, mid;
1117
1118 instance = 0;
1119 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1120 if (mid < NR_CPUS)
1121 cpu_set(mid, phys_cpu_present_map);
1122 instance++;
1123 }
1124}
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126void __devinit smp_prepare_boot_cpu(void)
1127{
David S. Miller56fb4df2006-02-26 23:24:22 -08001128 int cpu = hard_smp_processor_id();
1129
1130 if (cpu >= NR_CPUS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1132 prom_halt();
1133 }
1134
David S. Miller56fb4df2006-02-26 23:24:22 -08001135 current_thread_info()->cpu = cpu;
1136 __local_per_cpu_offset = __per_cpu_offset(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
1138 cpu_set(smp_processor_id(), cpu_online_map);
1139 cpu_set(smp_processor_id(), phys_cpu_present_map);
1140}
1141
1142int __devinit __cpu_up(unsigned int cpu)
1143{
1144 int ret = smp_boot_one_cpu(cpu);
1145
1146 if (!ret) {
1147 cpu_set(cpu, smp_commenced_mask);
1148 while (!cpu_isset(cpu, cpu_online_map))
1149 mb();
1150 if (!cpu_isset(cpu, cpu_online_map)) {
1151 ret = -ENODEV;
1152 } else {
1153 smp_synchronize_one_tick(cpu);
1154 }
1155 }
1156 return ret;
1157}
1158
1159void __init smp_cpus_done(unsigned int max_cpus)
1160{
1161 unsigned long bogosum = 0;
1162 int i;
1163
1164 for (i = 0; i < NR_CPUS; i++) {
1165 if (cpu_online(i))
1166 bogosum += cpu_data(i).udelay_val;
1167 }
1168 printk("Total of %ld processors activated "
1169 "(%lu.%02lu BogoMIPS).\n",
1170 (long) num_online_cpus(),
1171 bogosum/(500000/HZ),
1172 (bogosum/(5000/HZ))%100);
1173}
1174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175void smp_send_reschedule(int cpu)
1176{
Nick Piggin64c7c8f2005-11-08 21:39:04 -08001177 smp_receive_signal(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178}
1179
1180/* This is a nop because we capture all other cpus
1181 * anyways when making the PROM active.
1182 */
1183void smp_send_stop(void)
1184{
1185}
1186
David S. Millerd369ddd2005-07-10 15:45:11 -07001187unsigned long __per_cpu_base __read_mostly;
1188unsigned long __per_cpu_shift __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
1190EXPORT_SYMBOL(__per_cpu_base);
1191EXPORT_SYMBOL(__per_cpu_shift);
1192
1193void __init setup_per_cpu_areas(void)
1194{
1195 unsigned long goal, size, i;
1196 char *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198 /* Copy section for each CPU (we discard the original) */
David S. Miller56fb4df2006-02-26 23:24:22 -08001199 goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200#ifdef CONFIG_MODULES
1201 if (goal < PERCPU_ENOUGH_ROOM)
1202 goal = PERCPU_ENOUGH_ROOM;
1203#endif
1204 __per_cpu_shift = 0;
1205 for (size = 1UL; size < goal; size <<= 1UL)
1206 __per_cpu_shift++;
1207
David S. Miller56fb4df2006-02-26 23:24:22 -08001208 ptr = alloc_bootmem(size * NR_CPUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
1210 __per_cpu_base = ptr - __per_cpu_start;
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 for (i = 0; i < NR_CPUS; i++, ptr += size)
1213 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214}