| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* smp.c: Sparc64 SMP support. | 
|  | 2 | * | 
| David S. Miller | 27a2ef3 | 2007-07-14 00:58:53 -0700 | [diff] [blame] | 3 | * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | */ | 
|  | 5 |  | 
|  | 6 | #include <linux/module.h> | 
|  | 7 | #include <linux/kernel.h> | 
|  | 8 | #include <linux/sched.h> | 
|  | 9 | #include <linux/mm.h> | 
|  | 10 | #include <linux/pagemap.h> | 
|  | 11 | #include <linux/threads.h> | 
|  | 12 | #include <linux/smp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/interrupt.h> | 
|  | 14 | #include <linux/kernel_stat.h> | 
|  | 15 | #include <linux/delay.h> | 
|  | 16 | #include <linux/init.h> | 
|  | 17 | #include <linux/spinlock.h> | 
|  | 18 | #include <linux/fs.h> | 
|  | 19 | #include <linux/seq_file.h> | 
|  | 20 | #include <linux/cache.h> | 
|  | 21 | #include <linux/jiffies.h> | 
|  | 22 | #include <linux/profile.h> | 
|  | 23 | #include <linux/bootmem.h> | 
|  | 24 |  | 
|  | 25 | #include <asm/head.h> | 
|  | 26 | #include <asm/ptrace.h> | 
|  | 27 | #include <asm/atomic.h> | 
|  | 28 | #include <asm/tlbflush.h> | 
|  | 29 | #include <asm/mmu_context.h> | 
|  | 30 | #include <asm/cpudata.h> | 
| David S. Miller | 27a2ef3 | 2007-07-14 00:58:53 -0700 | [diff] [blame] | 31 | #include <asm/hvtramp.h> | 
|  | 32 | #include <asm/io.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 |  | 
|  | 34 | #include <asm/irq.h> | 
| Al Viro | 6d24c8d | 2006-10-08 08:23:28 -0400 | [diff] [blame] | 35 | #include <asm/irq_regs.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <asm/page.h> | 
|  | 37 | #include <asm/pgtable.h> | 
|  | 38 | #include <asm/oplib.h> | 
|  | 39 | #include <asm/uaccess.h> | 
|  | 40 | #include <asm/timer.h> | 
|  | 41 | #include <asm/starfire.h> | 
|  | 42 | #include <asm/tlb.h> | 
| David S. Miller | 56fb4df | 2006-02-26 23:24:22 -0800 | [diff] [blame] | 43 | #include <asm/sections.h> | 
| David S. Miller | 07f8e5f | 2006-06-21 23:34:02 -0700 | [diff] [blame] | 44 | #include <asm/prom.h> | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 45 | #include <asm/mdesc.h> | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 46 | #include <asm/ldc.h> | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 47 | #include <asm/hypervisor.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | extern void calibrate_delay(void); | 
|  | 50 |  | 
| David S. Miller | a2f9f6b | 2007-06-04 21:48:33 -0700 | [diff] [blame] | 51 | int sparc64_multi_core __read_mostly; | 
|  | 52 |  | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 53 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; | 
| Andrew Morton | c12a828 | 2005-07-12 12:09:43 -0700 | [diff] [blame] | 54 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; | 
| Mike Travis | d5a7430 | 2007-10-16 01:24:05 -0700 | [diff] [blame] | 55 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | 
| David S. Miller | f78eae2 | 2007-06-04 17:01:39 -0700 | [diff] [blame] | 56 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly = | 
|  | 57 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 58 |  | 
|  | 59 | EXPORT_SYMBOL(cpu_possible_map); | 
|  | 60 | EXPORT_SYMBOL(cpu_online_map); | 
| Mike Travis | d5a7430 | 2007-10-16 01:24:05 -0700 | [diff] [blame] | 61 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 62 | EXPORT_SYMBOL(cpu_core_map); | 
|  | 63 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | static cpumask_t smp_commenced_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  | 
|  | 66 | void smp_info(struct seq_file *m) | 
|  | 67 | { | 
|  | 68 | int i; | 
|  | 69 |  | 
|  | 70 | seq_printf(m, "State:\n"); | 
| Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 71 | for_each_online_cpu(i) | 
|  | 72 | seq_printf(m, "CPU%d:\t\tonline\n", i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | } | 
|  | 74 |  | 
|  | 75 | void smp_bogo(struct seq_file *m) | 
|  | 76 | { | 
|  | 77 | int i; | 
|  | 78 |  | 
| Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 79 | for_each_online_cpu(i) | 
|  | 80 | seq_printf(m, | 
| Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 81 | "Cpu%dClkTck\t: %016lx\n", | 
| Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 82 | i, cpu_data(i).clock_tick); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | } | 
|  | 84 |  | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 85 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | 
|  | 86 |  | 
| David S. Miller | 112f487 | 2007-03-05 15:28:37 -0800 | [diff] [blame] | 87 | extern void setup_sparc64_timer(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 |  | 
|  | 89 | static volatile unsigned long callin_flag = 0; | 
|  | 90 |  | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 91 | void __devinit smp_callin(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | { | 
|  | 93 | int cpuid = hard_smp_processor_id(); | 
|  | 94 |  | 
| David S. Miller | 56fb4df | 2006-02-26 23:24:22 -0800 | [diff] [blame] | 95 | __local_per_cpu_offset = __per_cpu_offset(cpuid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  | 
| David S. Miller | 4a07e64 | 2006-02-14 13:49:32 -0800 | [diff] [blame] | 97 | if (tlb_type == hypervisor) | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 98 | sun4v_ktsb_register(); | 
| David S. Miller | 481295f | 2006-02-07 21:51:08 -0800 | [diff] [blame] | 99 |  | 
| David S. Miller | 56fb4df | 2006-02-26 23:24:22 -0800 | [diff] [blame] | 100 | __flush_tlb_all(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 |  | 
| David S. Miller | 112f487 | 2007-03-05 15:28:37 -0800 | [diff] [blame] | 102 | setup_sparc64_timer(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 |  | 
| David S. Miller | 816242d | 2005-05-23 15:52:08 -0700 | [diff] [blame] | 104 | if (cheetah_pcache_forced_on) | 
|  | 105 | cheetah_enable_pcache(); | 
|  | 106 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | local_irq_enable(); | 
|  | 108 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | callin_flag = 1; | 
|  | 110 | __asm__ __volatile__("membar #Sync\n\t" | 
|  | 111 | "flush  %%g6" : : : "memory"); | 
|  | 112 |  | 
|  | 113 | /* Clear this or we will die instantly when we | 
|  | 114 | * schedule back to this idler... | 
|  | 115 | */ | 
| David S. Miller | db7d9a4 | 2005-07-24 19:36:26 -0700 | [diff] [blame] | 116 | current_thread_info()->new_child = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 |  | 
|  | 118 | /* Attach to the address space of init_task. */ | 
|  | 119 | atomic_inc(&init_mm.mm_count); | 
|  | 120 | current->active_mm = &init_mm; | 
|  | 121 |  | 
|  | 122 | while (!cpu_isset(cpuid, smp_commenced_mask)) | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 123 | rmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 |  | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 125 | spin_lock(&call_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | cpu_set(cpuid, cpu_online_map); | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 127 | spin_unlock(&call_lock); | 
| Nick Piggin | 5bfb5d6 | 2005-11-08 21:39:01 -0800 | [diff] [blame] | 128 |  | 
|  | 129 | /* idle thread is expected to have preempt disabled */ | 
|  | 130 | preempt_disable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | } | 
|  | 132 |  | 
|  | 133 | void cpu_panic(void) | 
|  | 134 | { | 
|  | 135 | printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); | 
|  | 136 | panic("SMP bolixed\n"); | 
|  | 137 | } | 
|  | 138 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | /* This tick register synchronization scheme is taken entirely from | 
|  | 140 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. | 
|  | 141 | * | 
|  | 142 | * The only change I've made is to rework it so that the master | 
|  | 143 | * initiates the synchonization instead of the slave. -DaveM | 
|  | 144 | */ | 
|  | 145 |  | 
|  | 146 | #define MASTER	0 | 
|  | 147 | #define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long)) | 
|  | 148 |  | 
|  | 149 | #define NUM_ROUNDS	64	/* magic value */ | 
|  | 150 | #define NUM_ITERS	5	/* likewise */ | 
|  | 151 |  | 
|  | 152 | static DEFINE_SPINLOCK(itc_sync_lock); | 
|  | 153 | static unsigned long go[SLAVE + 1]; | 
|  | 154 |  | 
|  | 155 | #define DEBUG_TICK_SYNC	0 | 
|  | 156 |  | 
|  | 157 | static inline long get_delta (long *rt, long *master) | 
|  | 158 | { | 
|  | 159 | unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; | 
|  | 160 | unsigned long tcenter, t0, t1, tm; | 
|  | 161 | unsigned long i; | 
|  | 162 |  | 
|  | 163 | for (i = 0; i < NUM_ITERS; i++) { | 
|  | 164 | t0 = tick_ops->get_tick(); | 
|  | 165 | go[MASTER] = 1; | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 166 | membar_storeload(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | while (!(tm = go[SLAVE])) | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 168 | rmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | go[SLAVE] = 0; | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 170 | wmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | t1 = tick_ops->get_tick(); | 
|  | 172 |  | 
|  | 173 | if (t1 - t0 < best_t1 - best_t0) | 
|  | 174 | best_t0 = t0, best_t1 = t1, best_tm = tm; | 
|  | 175 | } | 
|  | 176 |  | 
|  | 177 | *rt = best_t1 - best_t0; | 
|  | 178 | *master = best_tm - best_t0; | 
|  | 179 |  | 
|  | 180 | /* average best_t0 and best_t1 without overflow: */ | 
|  | 181 | tcenter = (best_t0/2 + best_t1/2); | 
|  | 182 | if (best_t0 % 2 + best_t1 % 2 == 2) | 
|  | 183 | tcenter++; | 
|  | 184 | return tcenter - best_tm; | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | void smp_synchronize_tick_client(void) | 
|  | 188 | { | 
|  | 189 | long i, delta, adj, adjust_latency = 0, done = 0; | 
|  | 190 | unsigned long flags, rt, master_time_stamp, bound; | 
|  | 191 | #if DEBUG_TICK_SYNC | 
|  | 192 | struct { | 
|  | 193 | long rt;	/* roundtrip time */ | 
|  | 194 | long master;	/* master's timestamp */ | 
|  | 195 | long diff;	/* difference between midpoint and master's timestamp */ | 
|  | 196 | long lat;	/* estimate of itc adjustment latency */ | 
|  | 197 | } t[NUM_ROUNDS]; | 
|  | 198 | #endif | 
|  | 199 |  | 
|  | 200 | go[MASTER] = 1; | 
|  | 201 |  | 
|  | 202 | while (go[MASTER]) | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 203 | rmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 |  | 
|  | 205 | local_irq_save(flags); | 
|  | 206 | { | 
|  | 207 | for (i = 0; i < NUM_ROUNDS; i++) { | 
|  | 208 | delta = get_delta(&rt, &master_time_stamp); | 
|  | 209 | if (delta == 0) { | 
|  | 210 | done = 1;	/* let's lock on to this... */ | 
|  | 211 | bound = rt; | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | if (!done) { | 
|  | 215 | if (i > 0) { | 
|  | 216 | adjust_latency += -delta; | 
|  | 217 | adj = -delta + adjust_latency/4; | 
|  | 218 | } else | 
|  | 219 | adj = -delta; | 
|  | 220 |  | 
| David S. Miller | 112f487 | 2007-03-05 15:28:37 -0800 | [diff] [blame] | 221 | tick_ops->add_tick(adj); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } | 
|  | 223 | #if DEBUG_TICK_SYNC | 
|  | 224 | t[i].rt = rt; | 
|  | 225 | t[i].master = master_time_stamp; | 
|  | 226 | t[i].diff = delta; | 
|  | 227 | t[i].lat = adjust_latency/4; | 
|  | 228 | #endif | 
|  | 229 | } | 
|  | 230 | } | 
|  | 231 | local_irq_restore(flags); | 
|  | 232 |  | 
|  | 233 | #if DEBUG_TICK_SYNC | 
|  | 234 | for (i = 0; i < NUM_ROUNDS; i++) | 
|  | 235 | printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", | 
|  | 236 | t[i].rt, t[i].master, t[i].diff, t[i].lat); | 
|  | 237 | #endif | 
|  | 238 |  | 
| Joe Perches | 519c4d2 | 2007-11-19 23:43:00 -0800 | [diff] [blame] | 239 | printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " | 
|  | 240 | "(last diff %ld cycles, maxerr %lu cycles)\n", | 
|  | 241 | smp_processor_id(), delta, rt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | } | 
|  | 243 |  | 
|  | 244 | static void smp_start_sync_tick_client(int cpu); | 
|  | 245 |  | 
|  | 246 | static void smp_synchronize_one_tick(int cpu) | 
|  | 247 | { | 
|  | 248 | unsigned long flags, i; | 
|  | 249 |  | 
|  | 250 | go[MASTER] = 0; | 
|  | 251 |  | 
|  | 252 | smp_start_sync_tick_client(cpu); | 
|  | 253 |  | 
|  | 254 | /* wait for client to be ready */ | 
|  | 255 | while (!go[MASTER]) | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 256 | rmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 |  | 
|  | 258 | /* now let the client proceed into his loop */ | 
|  | 259 | go[MASTER] = 0; | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 260 | membar_storeload(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 |  | 
|  | 262 | spin_lock_irqsave(&itc_sync_lock, flags); | 
|  | 263 | { | 
|  | 264 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { | 
|  | 265 | while (!go[MASTER]) | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 266 | rmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | go[MASTER] = 0; | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 268 | wmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | go[SLAVE] = tick_ops->get_tick(); | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 270 | membar_storeload(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | } | 
|  | 272 | } | 
|  | 273 | spin_unlock_irqrestore(&itc_sync_lock, flags); | 
|  | 274 | } | 
|  | 275 |  | 
| David S. Miller | b14f5c1 | 2007-07-14 00:45:16 -0700 | [diff] [blame] | 276 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) | 
| David S. Miller | 27a2ef3 | 2007-07-14 00:58:53 -0700 | [diff] [blame] | 277 | /* XXX Put this in some common place. XXX */ | 
|  | 278 | static unsigned long kimage_addr_to_ra(void *p) | 
|  | 279 | { | 
|  | 280 | unsigned long val = (unsigned long) p; | 
|  | 281 |  | 
|  | 282 | return kern_base + (val - KERNBASE); | 
|  | 283 | } | 
|  | 284 |  | 
| David S. Miller | b14f5c1 | 2007-07-14 00:45:16 -0700 | [diff] [blame] | 285 | static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) | 
|  | 286 | { | 
|  | 287 | extern unsigned long sparc64_ttable_tl0; | 
|  | 288 | extern unsigned long kern_locked_tte_data; | 
|  | 289 | extern int bigkernel; | 
|  | 290 | struct hvtramp_descr *hdesc; | 
|  | 291 | unsigned long trampoline_ra; | 
|  | 292 | struct trap_per_cpu *tb; | 
|  | 293 | u64 tte_vaddr, tte_data; | 
|  | 294 | unsigned long hv_err; | 
|  | 295 |  | 
|  | 296 | hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL); | 
|  | 297 | if (!hdesc) { | 
| David S. Miller | 27a2ef3 | 2007-07-14 00:58:53 -0700 | [diff] [blame] | 298 | printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " | 
| David S. Miller | b14f5c1 | 2007-07-14 00:45:16 -0700 | [diff] [blame] | 299 | "hvtramp_descr.\n"); | 
|  | 300 | return; | 
|  | 301 | } | 
|  | 302 |  | 
|  | 303 | hdesc->cpu = cpu; | 
|  | 304 | hdesc->num_mappings = (bigkernel ? 2 : 1); | 
|  | 305 |  | 
|  | 306 | tb = &trap_block[cpu]; | 
|  | 307 | tb->hdesc = hdesc; | 
|  | 308 |  | 
|  | 309 | hdesc->fault_info_va = (unsigned long) &tb->fault_info; | 
|  | 310 | hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); | 
|  | 311 |  | 
|  | 312 | hdesc->thread_reg = thread_reg; | 
|  | 313 |  | 
|  | 314 | tte_vaddr = (unsigned long) KERNBASE; | 
|  | 315 | tte_data = kern_locked_tte_data; | 
|  | 316 |  | 
|  | 317 | hdesc->maps[0].vaddr = tte_vaddr; | 
|  | 318 | hdesc->maps[0].tte   = tte_data; | 
|  | 319 | if (bigkernel) { | 
|  | 320 | tte_vaddr += 0x400000; | 
|  | 321 | tte_data  += 0x400000; | 
|  | 322 | hdesc->maps[1].vaddr = tte_vaddr; | 
|  | 323 | hdesc->maps[1].tte   = tte_data; | 
|  | 324 | } | 
|  | 325 |  | 
|  | 326 | trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); | 
|  | 327 |  | 
|  | 328 | hv_err = sun4v_cpu_start(cpu, trampoline_ra, | 
|  | 329 | kimage_addr_to_ra(&sparc64_ttable_tl0), | 
|  | 330 | __pa(hdesc)); | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 331 | if (hv_err) | 
|  | 332 | printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " | 
|  | 333 | "gives error %lu\n", hv_err); | 
| David S. Miller | b14f5c1 | 2007-07-14 00:45:16 -0700 | [diff] [blame] | 334 | } | 
|  | 335 | #endif | 
|  | 336 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | extern unsigned long sparc64_cpu_startup; | 
|  | 338 |  | 
|  | 339 | /* The OBP cpu startup callback truncates the 3rd arg cookie to | 
|  | 340 | * 32-bits (I think) so to be safe we have it read the pointer | 
|  | 341 | * contained here so we work on >4GB machines. -DaveM | 
|  | 342 | */ | 
|  | 343 | static struct thread_info *cpu_new_thread = NULL; | 
|  | 344 |  | 
|  | 345 | static int __devinit smp_boot_one_cpu(unsigned int cpu) | 
|  | 346 | { | 
| David S. Miller | b37d40d | 2007-07-15 01:08:03 -0700 | [diff] [blame] | 347 | struct trap_per_cpu *tb = &trap_block[cpu]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | unsigned long entry = | 
|  | 349 | (unsigned long)(&sparc64_cpu_startup); | 
|  | 350 | unsigned long cookie = | 
|  | 351 | (unsigned long)(&cpu_new_thread); | 
|  | 352 | struct task_struct *p; | 
| David S. Miller | 7890f79 | 2006-02-15 02:26:54 -0800 | [diff] [blame] | 353 | int timeout, ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 |  | 
|  | 355 | p = fork_idle(cpu); | 
| Akinobu Mita | 1177bf9 | 2007-10-04 14:55:59 -0700 | [diff] [blame] | 356 | if (IS_ERR(p)) | 
|  | 357 | return PTR_ERR(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | callin_flag = 0; | 
| Al Viro | f316964 | 2006-01-12 01:05:42 -0800 | [diff] [blame] | 359 | cpu_new_thread = task_thread_info(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 |  | 
| David S. Miller | 7890f79 | 2006-02-15 02:26:54 -0800 | [diff] [blame] | 361 | if (tlb_type == hypervisor) { | 
| David S. Miller | b14f5c1 | 2007-07-14 00:45:16 -0700 | [diff] [blame] | 362 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 363 | if (ldom_domaining_enabled) | 
|  | 364 | ldom_startcpu_cpuid(cpu, | 
|  | 365 | (unsigned long) cpu_new_thread); | 
|  | 366 | else | 
|  | 367 | #endif | 
|  | 368 | prom_startcpu_cpuid(cpu, entry, cookie); | 
| David S. Miller | 7890f79 | 2006-02-15 02:26:54 -0800 | [diff] [blame] | 369 | } else { | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 370 | struct device_node *dp = of_find_node_by_cpuid(cpu); | 
| David S. Miller | 7890f79 | 2006-02-15 02:26:54 -0800 | [diff] [blame] | 371 |  | 
| David S. Miller | 07f8e5f | 2006-06-21 23:34:02 -0700 | [diff] [blame] | 372 | prom_startcpu(dp->node, entry, cookie); | 
| David S. Miller | 7890f79 | 2006-02-15 02:26:54 -0800 | [diff] [blame] | 373 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 |  | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 375 | for (timeout = 0; timeout < 50000; timeout++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | if (callin_flag) | 
|  | 377 | break; | 
|  | 378 | udelay(100); | 
|  | 379 | } | 
| David S. Miller | 72aff53 | 2006-02-17 01:29:17 -0800 | [diff] [blame] | 380 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | if (callin_flag) { | 
|  | 382 | ret = 0; | 
|  | 383 | } else { | 
|  | 384 | printk("Processor %d is stuck.\n", cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | ret = -ENODEV; | 
|  | 386 | } | 
|  | 387 | cpu_new_thread = NULL; | 
|  | 388 |  | 
| David S. Miller | b37d40d | 2007-07-15 01:08:03 -0700 | [diff] [blame] | 389 | if (tb->hdesc) { | 
|  | 390 | kfree(tb->hdesc); | 
|  | 391 | tb->hdesc = NULL; | 
|  | 392 | } | 
|  | 393 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | return ret; | 
|  | 395 | } | 
|  | 396 |  | 
|  | 397 | static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) | 
|  | 398 | { | 
|  | 399 | u64 result, target; | 
|  | 400 | int stuck, tmp; | 
|  | 401 |  | 
|  | 402 | if (this_is_starfire) { | 
|  | 403 | /* map to real upaid */ | 
|  | 404 | cpu = (((cpu & 0x3c) << 1) | | 
|  | 405 | ((cpu & 0x40) >> 4) | | 
|  | 406 | (cpu & 0x3)); | 
|  | 407 | } | 
|  | 408 |  | 
|  | 409 | target = (cpu << 14) | 0x70; | 
|  | 410 | again: | 
|  | 411 | /* Ok, this is the real Spitfire Errata #54. | 
|  | 412 | * One must read back from a UDB internal register | 
|  | 413 | * after writes to the UDB interrupt dispatch, but | 
|  | 414 | * before the membar Sync for that write. | 
|  | 415 | * So we use the high UDB control register (ASI 0x7f, | 
|  | 416 | * ADDR 0x20) for the dummy read. -DaveM | 
|  | 417 | */ | 
|  | 418 | tmp = 0x40; | 
|  | 419 | __asm__ __volatile__( | 
|  | 420 | "wrpr	%1, %2, %%pstate\n\t" | 
|  | 421 | "stxa	%4, [%0] %3\n\t" | 
|  | 422 | "stxa	%5, [%0+%8] %3\n\t" | 
|  | 423 | "add	%0, %8, %0\n\t" | 
|  | 424 | "stxa	%6, [%0+%8] %3\n\t" | 
|  | 425 | "membar	#Sync\n\t" | 
|  | 426 | "stxa	%%g0, [%7] %3\n\t" | 
|  | 427 | "membar	#Sync\n\t" | 
|  | 428 | "mov	0x20, %%g1\n\t" | 
|  | 429 | "ldxa	[%%g1] 0x7f, %%g0\n\t" | 
|  | 430 | "membar	#Sync" | 
|  | 431 | : "=r" (tmp) | 
|  | 432 | : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W), | 
|  | 433 | "r" (data0), "r" (data1), "r" (data2), "r" (target), | 
|  | 434 | "r" (0x10), "0" (tmp) | 
|  | 435 | : "g1"); | 
|  | 436 |  | 
|  | 437 | /* NOTE: PSTATE_IE is still clear. */ | 
|  | 438 | stuck = 100000; | 
|  | 439 | do { | 
|  | 440 | __asm__ __volatile__("ldxa [%%g0] %1, %0" | 
|  | 441 | : "=r" (result) | 
|  | 442 | : "i" (ASI_INTR_DISPATCH_STAT)); | 
|  | 443 | if (result == 0) { | 
|  | 444 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | 
|  | 445 | : : "r" (pstate)); | 
|  | 446 | return; | 
|  | 447 | } | 
|  | 448 | stuck -= 1; | 
|  | 449 | if (stuck == 0) | 
|  | 450 | break; | 
|  | 451 | } while (result & 0x1); | 
|  | 452 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | 
|  | 453 | : : "r" (pstate)); | 
|  | 454 | if (stuck == 0) { | 
|  | 455 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | 
|  | 456 | smp_processor_id(), result); | 
|  | 457 | } else { | 
|  | 458 | udelay(2); | 
|  | 459 | goto again; | 
|  | 460 | } | 
|  | 461 | } | 
|  | 462 |  | 
| David S. Miller | d979f17 | 2007-10-27 00:13:04 -0700 | [diff] [blame] | 463 | static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | { | 
|  | 465 | u64 pstate; | 
|  | 466 | int i; | 
|  | 467 |  | 
|  | 468 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | 
|  | 469 | for_each_cpu_mask(i, mask) | 
|  | 470 | spitfire_xcall_helper(data0, data1, data2, pstate, i); | 
|  | 471 | } | 
|  | 472 |  | 
|  | 473 | /* Cheetah now allows to send the whole 64-bytes of data in the interrupt | 
|  | 474 | * packet, but we have no use for that.  However we do take advantage of | 
|  | 475 | * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). | 
|  | 476 | */ | 
|  | 477 | static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | 
|  | 478 | { | 
| David S. Miller | 0de56d1 | 2007-12-12 07:31:46 -0800 | [diff] [blame] | 479 | u64 pstate, ver, busy_mask; | 
| David S. Miller | 22adb35 | 2007-05-26 01:14:43 -0700 | [diff] [blame] | 480 | int nack_busy_id, is_jbus, need_more; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 |  | 
|  | 482 | if (cpus_empty(mask)) | 
|  | 483 | return; | 
|  | 484 |  | 
|  | 485 | /* Unfortunately, someone at Sun had the brilliant idea to make the | 
|  | 486 | * busy/nack fields hard-coded by ITID number for this Ultra-III | 
|  | 487 | * derivative processor. | 
|  | 488 | */ | 
|  | 489 | __asm__ ("rdpr %%ver, %0" : "=r" (ver)); | 
| David S. Miller | 92704a1 | 2006-02-26 23:27:19 -0800 | [diff] [blame] | 490 | is_jbus = ((ver >> 32) == __JALAPENO_ID || | 
|  | 491 | (ver >> 32) == __SERRANO_ID); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 |  | 
|  | 493 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | 
|  | 494 |  | 
|  | 495 | retry: | 
| David S. Miller | 22adb35 | 2007-05-26 01:14:43 -0700 | [diff] [blame] | 496 | need_more = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" | 
|  | 498 | : : "r" (pstate), "i" (PSTATE_IE)); | 
|  | 499 |  | 
|  | 500 | /* Setup the dispatch data registers. */ | 
|  | 501 | __asm__ __volatile__("stxa	%0, [%3] %6\n\t" | 
|  | 502 | "stxa	%1, [%4] %6\n\t" | 
|  | 503 | "stxa	%2, [%5] %6\n\t" | 
|  | 504 | "membar	#Sync\n\t" | 
|  | 505 | : /* no outputs */ | 
|  | 506 | : "r" (data0), "r" (data1), "r" (data2), | 
|  | 507 | "r" (0x40), "r" (0x50), "r" (0x60), | 
|  | 508 | "i" (ASI_INTR_W)); | 
|  | 509 |  | 
|  | 510 | nack_busy_id = 0; | 
| David S. Miller | 0de56d1 | 2007-12-12 07:31:46 -0800 | [diff] [blame] | 511 | busy_mask = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | { | 
|  | 513 | int i; | 
|  | 514 |  | 
|  | 515 | for_each_cpu_mask(i, mask) { | 
|  | 516 | u64 target = (i << 14) | 0x70; | 
|  | 517 |  | 
| David S. Miller | 0de56d1 | 2007-12-12 07:31:46 -0800 | [diff] [blame] | 518 | if (is_jbus) { | 
|  | 519 | busy_mask |= (0x1UL << (i * 2)); | 
|  | 520 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | target |= (nack_busy_id << 24); | 
| David S. Miller | 0de56d1 | 2007-12-12 07:31:46 -0800 | [diff] [blame] | 522 | busy_mask |= (0x1UL << | 
|  | 523 | (nack_busy_id * 2)); | 
|  | 524 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | __asm__ __volatile__( | 
|  | 526 | "stxa	%%g0, [%0] %1\n\t" | 
|  | 527 | "membar	#Sync\n\t" | 
|  | 528 | : /* no outputs */ | 
|  | 529 | : "r" (target), "i" (ASI_INTR_W)); | 
|  | 530 | nack_busy_id++; | 
| David S. Miller | 22adb35 | 2007-05-26 01:14:43 -0700 | [diff] [blame] | 531 | if (nack_busy_id == 32) { | 
|  | 532 | need_more = 1; | 
|  | 533 | break; | 
|  | 534 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | } | 
|  | 536 | } | 
|  | 537 |  | 
|  | 538 | /* Now, poll for completion. */ | 
|  | 539 | { | 
| David S. Miller | 0de56d1 | 2007-12-12 07:31:46 -0800 | [diff] [blame] | 540 | u64 dispatch_stat, nack_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | long stuck; | 
|  | 542 |  | 
|  | 543 | stuck = 100000 * nack_busy_id; | 
| David S. Miller | 0de56d1 | 2007-12-12 07:31:46 -0800 | [diff] [blame] | 544 | nack_mask = busy_mask << 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | do { | 
|  | 546 | __asm__ __volatile__("ldxa	[%%g0] %1, %0" | 
|  | 547 | : "=r" (dispatch_stat) | 
|  | 548 | : "i" (ASI_INTR_DISPATCH_STAT)); | 
| David S. Miller | 0de56d1 | 2007-12-12 07:31:46 -0800 | [diff] [blame] | 549 | if (!(dispatch_stat & (busy_mask | nack_mask))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | 
|  | 551 | : : "r" (pstate)); | 
| David S. Miller | 22adb35 | 2007-05-26 01:14:43 -0700 | [diff] [blame] | 552 | if (unlikely(need_more)) { | 
|  | 553 | int i, cnt = 0; | 
|  | 554 | for_each_cpu_mask(i, mask) { | 
|  | 555 | cpu_clear(i, mask); | 
|  | 556 | cnt++; | 
|  | 557 | if (cnt == 32) | 
|  | 558 | break; | 
|  | 559 | } | 
|  | 560 | goto retry; | 
|  | 561 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | return; | 
|  | 563 | } | 
|  | 564 | if (!--stuck) | 
|  | 565 | break; | 
| David S. Miller | 0de56d1 | 2007-12-12 07:31:46 -0800 | [diff] [blame] | 566 | } while (dispatch_stat & busy_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 |  | 
|  | 568 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | 
|  | 569 | : : "r" (pstate)); | 
|  | 570 |  | 
| David S. Miller | 0de56d1 | 2007-12-12 07:31:46 -0800 | [diff] [blame] | 571 | if (dispatch_stat & busy_mask) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | /* Busy bits will not clear, continue instead | 
|  | 573 | * of freezing up on this cpu. | 
|  | 574 | */ | 
|  | 575 | printk("CPU[%d]: mondo stuckage result[%016lx]\n", | 
|  | 576 | smp_processor_id(), dispatch_stat); | 
|  | 577 | } else { | 
|  | 578 | int i, this_busy_nack = 0; | 
|  | 579 |  | 
|  | 580 | /* Delay some random time with interrupts enabled | 
|  | 581 | * to prevent deadlock. | 
|  | 582 | */ | 
|  | 583 | udelay(2 * nack_busy_id); | 
|  | 584 |  | 
|  | 585 | /* Clear out the mask bits for cpus which did not | 
|  | 586 | * NACK us. | 
|  | 587 | */ | 
|  | 588 | for_each_cpu_mask(i, mask) { | 
|  | 589 | u64 check_mask; | 
|  | 590 |  | 
| David S. Miller | 92704a1 | 2006-02-26 23:27:19 -0800 | [diff] [blame] | 591 | if (is_jbus) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | check_mask = (0x2UL << (2*i)); | 
|  | 593 | else | 
|  | 594 | check_mask = (0x2UL << | 
|  | 595 | this_busy_nack); | 
|  | 596 | if ((dispatch_stat & check_mask) == 0) | 
|  | 597 | cpu_clear(i, mask); | 
|  | 598 | this_busy_nack += 2; | 
| David S. Miller | 22adb35 | 2007-05-26 01:14:43 -0700 | [diff] [blame] | 599 | if (this_busy_nack == 64) | 
|  | 600 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | } | 
|  | 602 |  | 
|  | 603 | goto retry; | 
|  | 604 | } | 
|  | 605 | } | 
|  | 606 | } | 
|  | 607 |  | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 608 | /* Multi-cpu list version.  */ | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 609 | static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | 
|  | 610 | { | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 611 | struct trap_per_cpu *tb; | 
|  | 612 | u16 *cpu_list; | 
|  | 613 | u64 *mondo; | 
|  | 614 | cpumask_t error_mask; | 
|  | 615 | unsigned long flags, status; | 
| David S. Miller | 3cab0c3 | 2006-03-02 21:50:47 -0800 | [diff] [blame] | 616 | int cnt, retries, this_cpu, prev_sent, i; | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 617 |  | 
| David S. Miller | 17f34f0 | 2007-05-14 02:01:52 -0700 | [diff] [blame] | 618 | if (cpus_empty(mask)) | 
|  | 619 | return; | 
|  | 620 |  | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 621 | /* We have to do this whole thing with interrupts fully disabled. | 
|  | 622 | * Otherwise if we send an xcall from interrupt context it will | 
|  | 623 | * corrupt both our mondo block and cpu list state. | 
|  | 624 | * | 
|  | 625 | * One consequence of this is that we cannot use timeout mechanisms | 
|  | 626 | * that depend upon interrupts being delivered locally.  So, for | 
|  | 627 | * example, we cannot sample jiffies and expect it to advance. | 
|  | 628 | * | 
|  | 629 | * Fortunately, udelay() uses %stick/%tick so we can use that. | 
|  | 630 | */ | 
|  | 631 | local_irq_save(flags); | 
|  | 632 |  | 
|  | 633 | this_cpu = smp_processor_id(); | 
|  | 634 | tb = &trap_block[this_cpu]; | 
|  | 635 |  | 
|  | 636 | mondo = __va(tb->cpu_mondo_block_pa); | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 637 | mondo[0] = data0; | 
|  | 638 | mondo[1] = data1; | 
|  | 639 | mondo[2] = data2; | 
|  | 640 | wmb(); | 
|  | 641 |  | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 642 | cpu_list = __va(tb->cpu_list_pa); | 
|  | 643 |  | 
|  | 644 | /* Setup the initial cpu list.  */ | 
|  | 645 | cnt = 0; | 
|  | 646 | for_each_cpu_mask(i, mask) | 
|  | 647 | cpu_list[cnt++] = i; | 
|  | 648 |  | 
|  | 649 | cpus_clear(error_mask); | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 650 | retries = 0; | 
| David S. Miller | 3cab0c3 | 2006-03-02 21:50:47 -0800 | [diff] [blame] | 651 | prev_sent = 0; | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 652 | do { | 
| David S. Miller | 3cab0c3 | 2006-03-02 21:50:47 -0800 | [diff] [blame] | 653 | int forward_progress, n_sent; | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 654 |  | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 655 | status = sun4v_cpu_mondo_send(cnt, | 
|  | 656 | tb->cpu_list_pa, | 
|  | 657 | tb->cpu_mondo_block_pa); | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 658 |  | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 659 | /* HV_EOK means all cpus received the xcall, we're done.  */ | 
|  | 660 | if (likely(status == HV_EOK)) | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 661 | break; | 
|  | 662 |  | 
| David S. Miller | 3cab0c3 | 2006-03-02 21:50:47 -0800 | [diff] [blame] | 663 | /* First, see if we made any forward progress. | 
|  | 664 | * | 
|  | 665 | * The hypervisor indicates successful sends by setting | 
|  | 666 | * cpu list entries to the value 0xffff. | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 667 | */ | 
| David S. Miller | 3cab0c3 | 2006-03-02 21:50:47 -0800 | [diff] [blame] | 668 | n_sent = 0; | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 669 | for (i = 0; i < cnt; i++) { | 
| David S. Miller | 3cab0c3 | 2006-03-02 21:50:47 -0800 | [diff] [blame] | 670 | if (likely(cpu_list[i] == 0xffff)) | 
|  | 671 | n_sent++; | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 672 | } | 
|  | 673 |  | 
| David S. Miller | 3cab0c3 | 2006-03-02 21:50:47 -0800 | [diff] [blame] | 674 | forward_progress = 0; | 
|  | 675 | if (n_sent > prev_sent) | 
|  | 676 | forward_progress = 1; | 
|  | 677 |  | 
|  | 678 | prev_sent = n_sent; | 
|  | 679 |  | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 680 | /* If we get a HV_ECPUERROR, then one or more of the cpus | 
|  | 681 | * in the list are in error state.  Use the cpu_state() | 
|  | 682 | * hypervisor call to find out which cpus are in error state. | 
|  | 683 | */ | 
|  | 684 | if (unlikely(status == HV_ECPUERROR)) { | 
|  | 685 | for (i = 0; i < cnt; i++) { | 
|  | 686 | long err; | 
|  | 687 | u16 cpu; | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 688 |  | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 689 | cpu = cpu_list[i]; | 
|  | 690 | if (cpu == 0xffff) | 
|  | 691 | continue; | 
|  | 692 |  | 
|  | 693 | err = sun4v_cpu_state(cpu); | 
|  | 694 | if (err >= 0 && | 
|  | 695 | err == HV_CPU_STATE_ERROR) { | 
| David S. Miller | 3cab0c3 | 2006-03-02 21:50:47 -0800 | [diff] [blame] | 696 | cpu_list[i] = 0xffff; | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 697 | cpu_set(cpu, error_mask); | 
|  | 698 | } | 
|  | 699 | } | 
|  | 700 | } else if (unlikely(status != HV_EWOULDBLOCK)) | 
|  | 701 | goto fatal_mondo_error; | 
|  | 702 |  | 
| David S. Miller | 3cab0c3 | 2006-03-02 21:50:47 -0800 | [diff] [blame] | 703 | /* Don't bother rewriting the CPU list, just leave the | 
|  | 704 | * 0xffff and non-0xffff entries in there and the | 
|  | 705 | * hypervisor will do the right thing. | 
|  | 706 | * | 
|  | 707 | * Only advance timeout state if we didn't make any | 
|  | 708 | * forward progress. | 
|  | 709 | */ | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 710 | if (unlikely(!forward_progress)) { | 
|  | 711 | if (unlikely(++retries > 10000)) | 
|  | 712 | goto fatal_mondo_timeout; | 
|  | 713 |  | 
|  | 714 | /* Delay a little bit to let other cpus catch up | 
|  | 715 | * on their cpu mondo queue work. | 
|  | 716 | */ | 
|  | 717 | udelay(2 * cnt); | 
|  | 718 | } | 
| David S. Miller | 1d2f1f9 | 2006-02-08 16:41:20 -0800 | [diff] [blame] | 719 | } while (1); | 
|  | 720 |  | 
| David S. Miller | b830ab6 | 2006-02-28 15:10:26 -0800 | [diff] [blame] | 721 | local_irq_restore(flags); | 
|  | 722 |  | 
|  | 723 | if (unlikely(!cpus_empty(error_mask))) | 
|  | 724 | goto fatal_mondo_cpu_error; | 
|  | 725 |  | 
|  | 726 | return; | 
|  | 727 |  | 
|  | 728 | fatal_mondo_cpu_error: | 
|  | 729 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " | 
|  | 730 | "were in error state\n", | 
|  | 731 | this_cpu); | 
|  | 732 | printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu); | 
|  | 733 | for_each_cpu_mask(i, error_mask) | 
|  | 734 | printk("%d ", i); | 
|  | 735 | printk("]\n"); | 
|  | 736 | return; | 
|  | 737 |  | 
|  | 738 | fatal_mondo_timeout: | 
|  | 739 | local_irq_restore(flags); | 
|  | 740 | printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " | 
|  | 741 | " progress after %d retries.\n", | 
|  | 742 | this_cpu, retries); | 
|  | 743 | goto dump_cpu_list_and_out; | 
|  | 744 |  | 
|  | 745 | fatal_mondo_error: | 
|  | 746 | local_irq_restore(flags); | 
|  | 747 | printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", | 
|  | 748 | this_cpu, status); | 
|  | 749 | printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " | 
|  | 750 | "mondo_block_pa(%lx)\n", | 
|  | 751 | this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); | 
|  | 752 |  | 
|  | 753 | dump_cpu_list_and_out: | 
|  | 754 | printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); | 
|  | 755 | for (i = 0; i < cnt; i++) | 
|  | 756 | printk("%u ", cpu_list[i]); | 
|  | 757 | printk("]\n"); | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 758 | } | 
|  | 759 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | /* Send cross call to all processors mentioned in MASK | 
|  | 761 | * except self. | 
|  | 762 | */ | 
|  | 763 | static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask) | 
|  | 764 | { | 
|  | 765 | u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff)); | 
|  | 766 | int this_cpu = get_cpu(); | 
|  | 767 |  | 
|  | 768 | cpus_and(mask, mask, cpu_online_map); | 
|  | 769 | cpu_clear(this_cpu, mask); | 
|  | 770 |  | 
|  | 771 | if (tlb_type == spitfire) | 
|  | 772 | spitfire_xcall_deliver(data0, data1, data2, mask); | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 773 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | cheetah_xcall_deliver(data0, data1, data2, mask); | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 775 | else | 
|  | 776 | hypervisor_xcall_deliver(data0, data1, data2, mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | /* NOTE: Caller runs local copy on master. */ | 
|  | 778 |  | 
|  | 779 | put_cpu(); | 
|  | 780 | } | 
|  | 781 |  | 
|  | 782 | extern unsigned long xcall_sync_tick; | 
|  | 783 |  | 
|  | 784 | static void smp_start_sync_tick_client(int cpu) | 
|  | 785 | { | 
|  | 786 | cpumask_t mask = cpumask_of_cpu(cpu); | 
|  | 787 |  | 
|  | 788 | smp_cross_call_masked(&xcall_sync_tick, | 
|  | 789 | 0, 0, 0, mask); | 
|  | 790 | } | 
|  | 791 |  | 
|  | 792 | /* Send cross call to all processors except self. */ | 
|  | 793 | #define smp_cross_call(func, ctx, data1, data2) \ | 
|  | 794 | smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map) | 
|  | 795 |  | 
|  | 796 | struct call_data_struct { | 
|  | 797 | void (*func) (void *info); | 
|  | 798 | void *info; | 
|  | 799 | atomic_t finished; | 
|  | 800 | int wait; | 
|  | 801 | }; | 
|  | 802 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | static struct call_data_struct *call_data; | 
|  | 804 |  | 
|  | 805 | extern unsigned long xcall_call_function; | 
|  | 806 |  | 
| David S. Miller | aa1d1a0 | 2006-04-06 16:54:33 -0700 | [diff] [blame] | 807 | /** | 
|  | 808 | * smp_call_function(): Run a function on all other CPUs. | 
|  | 809 | * @func: The function to run. This must be fast and non-blocking. | 
|  | 810 | * @info: An arbitrary pointer to pass to the function. | 
|  | 811 | * @nonatomic: currently unused. | 
|  | 812 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | 
|  | 813 | * | 
|  | 814 | * Returns 0 on success, else a negative status code. Does not return until | 
|  | 815 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | 
|  | 816 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | * You must not call this function with disabled interrupts or from a | 
|  | 818 | * hardware interrupt handler or from a bottom half handler. | 
|  | 819 | */ | 
| David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 820 | static int smp_call_function_mask(void (*func)(void *info), void *info, | 
|  | 821 | int nonatomic, int wait, cpumask_t mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 | { | 
|  | 823 | struct call_data_struct data; | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 824 | int cpus; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 826 | /* Can deadlock when called with interrupts disabled */ | 
|  | 827 | WARN_ON(irqs_disabled()); | 
|  | 828 |  | 
|  | 829 | data.func = func; | 
|  | 830 | data.info = info; | 
|  | 831 | atomic_set(&data.finished, 0); | 
|  | 832 | data.wait = wait; | 
|  | 833 |  | 
|  | 834 | spin_lock(&call_lock); | 
|  | 835 |  | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 836 | cpu_clear(smp_processor_id(), mask); | 
|  | 837 | cpus = cpus_weight(mask); | 
|  | 838 | if (!cpus) | 
|  | 839 | goto out_unlock; | 
|  | 840 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | call_data = &data; | 
| David S. Miller | aa1d1a0 | 2006-04-06 16:54:33 -0700 | [diff] [blame] | 842 | mb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 |  | 
| David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 844 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 |  | 
| David S. Miller | aa1d1a0 | 2006-04-06 16:54:33 -0700 | [diff] [blame] | 846 | /* Wait for response */ | 
|  | 847 | while (atomic_read(&data.finished) != cpus) | 
|  | 848 | cpu_relax(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 |  | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 850 | out_unlock: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | spin_unlock(&call_lock); | 
|  | 852 |  | 
|  | 853 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | } | 
|  | 855 |  | 
| David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 856 | int smp_call_function(void (*func)(void *info), void *info, | 
|  | 857 | int nonatomic, int wait) | 
|  | 858 | { | 
|  | 859 | return smp_call_function_mask(func, info, nonatomic, wait, | 
|  | 860 | cpu_online_map); | 
|  | 861 | } | 
|  | 862 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | void smp_call_function_client(int irq, struct pt_regs *regs) | 
|  | 864 | { | 
|  | 865 | void (*func) (void *info) = call_data->func; | 
|  | 866 | void *info = call_data->info; | 
|  | 867 |  | 
|  | 868 | clear_softint(1 << irq); | 
|  | 869 | if (call_data->wait) { | 
|  | 870 | /* let initiator proceed only after completion */ | 
|  | 871 | func(info); | 
|  | 872 | atomic_inc(&call_data->finished); | 
|  | 873 | } else { | 
|  | 874 | /* let initiator proceed after getting data */ | 
|  | 875 | atomic_inc(&call_data->finished); | 
|  | 876 | func(info); | 
|  | 877 | } | 
|  | 878 | } | 
|  | 879 |  | 
| David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 880 | static void tsb_sync(void *info) | 
|  | 881 | { | 
| David S. Miller | 6f25f39 | 2006-03-28 13:29:26 -0800 | [diff] [blame] | 882 | struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; | 
| David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 883 | struct mm_struct *mm = info; | 
|  | 884 |  | 
| David S. Miller | 6f25f39 | 2006-03-28 13:29:26 -0800 | [diff] [blame] | 885 | /* It is not valid to test "currrent->active_mm == mm" here. | 
|  | 886 | * | 
|  | 887 | * The value of "current" is not changed atomically with | 
|  | 888 | * switch_mm().  But that's OK, we just need to check the | 
|  | 889 | * current cpu's trap block PGD physical address. | 
|  | 890 | */ | 
|  | 891 | if (tp->pgd_paddr == __pa(mm->pgd)) | 
| David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 892 | tsb_context_switch(mm); | 
|  | 893 | } | 
|  | 894 |  | 
|  | 895 | void smp_tsb_sync(struct mm_struct *mm) | 
|  | 896 | { | 
|  | 897 | smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); | 
|  | 898 | } | 
|  | 899 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | extern unsigned long xcall_flush_tlb_mm; | 
|  | 901 | extern unsigned long xcall_flush_tlb_pending; | 
|  | 902 | extern unsigned long xcall_flush_tlb_kernel_range; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | extern unsigned long xcall_report_regs; | 
|  | 904 | extern unsigned long xcall_receive_signal; | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 905 | extern unsigned long xcall_new_mmu_context_version; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 |  | 
|  | 907 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 908 | extern unsigned long xcall_flush_dcache_page_cheetah; | 
|  | 909 | #endif | 
|  | 910 | extern unsigned long xcall_flush_dcache_page_spitfire; | 
|  | 911 |  | 
|  | 912 | #ifdef CONFIG_DEBUG_DCFLUSH | 
|  | 913 | extern atomic_t dcpage_flushes; | 
|  | 914 | extern atomic_t dcpage_flushes_xcall; | 
|  | 915 | #endif | 
|  | 916 |  | 
| David S. Miller | d979f17 | 2007-10-27 00:13:04 -0700 | [diff] [blame] | 917 | static inline void __local_flush_dcache_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | { | 
|  | 919 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 920 | __flush_dcache_page(page_address(page), | 
|  | 921 | ((tlb_type == spitfire) && | 
|  | 922 | page_mapping(page) != NULL)); | 
|  | 923 | #else | 
|  | 924 | if (page_mapping(page) != NULL && | 
|  | 925 | tlb_type == spitfire) | 
|  | 926 | __flush_icache_page(__pa(page_address(page))); | 
|  | 927 | #endif | 
|  | 928 | } | 
|  | 929 |  | 
|  | 930 | void smp_flush_dcache_page_impl(struct page *page, int cpu) | 
|  | 931 | { | 
|  | 932 | cpumask_t mask = cpumask_of_cpu(cpu); | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 933 | int this_cpu; | 
|  | 934 |  | 
|  | 935 | if (tlb_type == hypervisor) | 
|  | 936 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 937 |  | 
|  | 938 | #ifdef CONFIG_DEBUG_DCFLUSH | 
|  | 939 | atomic_inc(&dcpage_flushes); | 
|  | 940 | #endif | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 941 |  | 
|  | 942 | this_cpu = get_cpu(); | 
|  | 943 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | if (cpu == this_cpu) { | 
|  | 945 | __local_flush_dcache_page(page); | 
|  | 946 | } else if (cpu_online(cpu)) { | 
|  | 947 | void *pg_addr = page_address(page); | 
|  | 948 | u64 data0; | 
|  | 949 |  | 
|  | 950 | if (tlb_type == spitfire) { | 
|  | 951 | data0 = | 
|  | 952 | ((u64)&xcall_flush_dcache_page_spitfire); | 
|  | 953 | if (page_mapping(page) != NULL) | 
|  | 954 | data0 |= ((u64)1 << 32); | 
|  | 955 | spitfire_xcall_deliver(data0, | 
|  | 956 | __pa(pg_addr), | 
|  | 957 | (u64) pg_addr, | 
|  | 958 | mask); | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 959 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 961 | data0 = | 
|  | 962 | ((u64)&xcall_flush_dcache_page_cheetah); | 
|  | 963 | cheetah_xcall_deliver(data0, | 
|  | 964 | __pa(pg_addr), | 
|  | 965 | 0, mask); | 
|  | 966 | #endif | 
|  | 967 | } | 
|  | 968 | #ifdef CONFIG_DEBUG_DCFLUSH | 
|  | 969 | atomic_inc(&dcpage_flushes_xcall); | 
|  | 970 | #endif | 
|  | 971 | } | 
|  | 972 |  | 
|  | 973 | put_cpu(); | 
|  | 974 | } | 
|  | 975 |  | 
|  | 976 | void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | 
|  | 977 | { | 
|  | 978 | void *pg_addr = page_address(page); | 
|  | 979 | cpumask_t mask = cpu_online_map; | 
|  | 980 | u64 data0; | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 981 | int this_cpu; | 
|  | 982 |  | 
|  | 983 | if (tlb_type == hypervisor) | 
|  | 984 | return; | 
|  | 985 |  | 
|  | 986 | this_cpu = get_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 |  | 
|  | 988 | cpu_clear(this_cpu, mask); | 
|  | 989 |  | 
|  | 990 | #ifdef CONFIG_DEBUG_DCFLUSH | 
|  | 991 | atomic_inc(&dcpage_flushes); | 
|  | 992 | #endif | 
|  | 993 | if (cpus_empty(mask)) | 
|  | 994 | goto flush_self; | 
|  | 995 | if (tlb_type == spitfire) { | 
|  | 996 | data0 = ((u64)&xcall_flush_dcache_page_spitfire); | 
|  | 997 | if (page_mapping(page) != NULL) | 
|  | 998 | data0 |= ((u64)1 << 32); | 
|  | 999 | spitfire_xcall_deliver(data0, | 
|  | 1000 | __pa(pg_addr), | 
|  | 1001 | (u64) pg_addr, | 
|  | 1002 | mask); | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 1003 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1004 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 1005 | data0 = ((u64)&xcall_flush_dcache_page_cheetah); | 
|  | 1006 | cheetah_xcall_deliver(data0, | 
|  | 1007 | __pa(pg_addr), | 
|  | 1008 | 0, mask); | 
|  | 1009 | #endif | 
|  | 1010 | } | 
|  | 1011 | #ifdef CONFIG_DEBUG_DCFLUSH | 
|  | 1012 | atomic_inc(&dcpage_flushes_xcall); | 
|  | 1013 | #endif | 
|  | 1014 | flush_self: | 
|  | 1015 | __local_flush_dcache_page(page); | 
|  | 1016 |  | 
|  | 1017 | put_cpu(); | 
|  | 1018 | } | 
|  | 1019 |  | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 1020 | static void __smp_receive_signal_mask(cpumask_t mask) | 
|  | 1021 | { | 
|  | 1022 | smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask); | 
|  | 1023 | } | 
|  | 1024 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1025 | void smp_receive_signal(int cpu) | 
|  | 1026 | { | 
|  | 1027 | cpumask_t mask = cpumask_of_cpu(cpu); | 
|  | 1028 |  | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 1029 | if (cpu_online(cpu)) | 
|  | 1030 | __smp_receive_signal_mask(mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1031 | } | 
|  | 1032 |  | 
|  | 1033 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | 
|  | 1034 | { | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 1035 | clear_softint(1 << irq); | 
|  | 1036 | } | 
|  | 1037 |  | 
|  | 1038 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | 
|  | 1039 | { | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 1040 | struct mm_struct *mm; | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 1041 | unsigned long flags; | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 1042 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | clear_softint(1 << irq); | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 1044 |  | 
|  | 1045 | /* See if we need to allocate a new TLB context because | 
|  | 1046 | * the version of the one we are using is now out of date. | 
|  | 1047 | */ | 
|  | 1048 | mm = current->active_mm; | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 1049 | if (unlikely(!mm || (mm == &init_mm))) | 
|  | 1050 | return; | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 1051 |  | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 1052 | spin_lock_irqsave(&mm->context.lock, flags); | 
| David S. Miller | aac0aad | 2006-02-27 17:56:51 -0800 | [diff] [blame] | 1053 |  | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 1054 | if (unlikely(!CTX_VALID(mm->context))) | 
|  | 1055 | get_new_mmu_context(mm); | 
| David S. Miller | aac0aad | 2006-02-27 17:56:51 -0800 | [diff] [blame] | 1056 |  | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 1057 | spin_unlock_irqrestore(&mm->context.lock, flags); | 
| David S. Miller | aac0aad | 2006-02-27 17:56:51 -0800 | [diff] [blame] | 1058 |  | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 1059 | load_secondary_context(mm); | 
|  | 1060 | __flush_tlb_mm(CTX_HWBITS(mm->context), | 
|  | 1061 | SECONDARY_CONTEXT); | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 1062 | } | 
|  | 1063 |  | 
|  | 1064 | void smp_new_mmu_context_version(void) | 
|  | 1065 | { | 
| David S. Miller | ee29074 | 2006-03-06 22:50:44 -0800 | [diff] [blame] | 1066 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | } | 
|  | 1068 |  | 
|  | 1069 | void smp_report_regs(void) | 
|  | 1070 | { | 
|  | 1071 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | 
|  | 1072 | } | 
|  | 1073 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1074 | /* We know that the window frames of the user have been flushed | 
|  | 1075 | * to the stack before we get here because all callers of us | 
|  | 1076 | * are flush_tlb_*() routines, and these run after flush_cache_*() | 
|  | 1077 | * which performs the flushw. | 
|  | 1078 | * | 
|  | 1079 | * The SMP TLB coherency scheme we use works as follows: | 
|  | 1080 | * | 
|  | 1081 | * 1) mm->cpu_vm_mask is a bit mask of which cpus an address | 
|  | 1082 | *    space has (potentially) executed on, this is the heuristic | 
|  | 1083 | *    we use to avoid doing cross calls. | 
|  | 1084 | * | 
|  | 1085 | *    Also, for flushing from kswapd and also for clones, we | 
|  | 1086 | *    use cpu_vm_mask as the list of cpus to make run the TLB. | 
|  | 1087 | * | 
|  | 1088 | * 2) TLB context numbers are shared globally across all processors | 
|  | 1089 | *    in the system, this allows us to play several games to avoid | 
|  | 1090 | *    cross calls. | 
|  | 1091 | * | 
|  | 1092 | *    One invariant is that when a cpu switches to a process, and | 
|  | 1093 | *    that processes tsk->active_mm->cpu_vm_mask does not have the | 
|  | 1094 | *    current cpu's bit set, that tlb context is flushed locally. | 
|  | 1095 | * | 
|  | 1096 | *    If the address space is non-shared (ie. mm->count == 1) we avoid | 
|  | 1097 | *    cross calls when we want to flush the currently running process's | 
|  | 1098 | *    tlb state.  This is done by clearing all cpu bits except the current | 
|  | 1099 | *    processor's in current->active_mm->cpu_vm_mask and performing the | 
|  | 1100 | *    flush locally only.  This will force any subsequent cpus which run | 
|  | 1101 | *    this task to flush the context from the local tlb if the process | 
|  | 1102 | *    migrates to another cpu (again). | 
|  | 1103 | * | 
|  | 1104 | * 3) For shared address spaces (threads) and swapping we bite the | 
|  | 1105 | *    bullet for most cases and perform the cross call (but only to | 
|  | 1106 | *    the cpus listed in cpu_vm_mask). | 
|  | 1107 | * | 
|  | 1108 | *    The performance gain from "optimizing" away the cross call for threads is | 
|  | 1109 | *    questionable (in theory the big win for threads is the massive sharing of | 
|  | 1110 | *    address space state across processors). | 
|  | 1111 | */ | 
| David S. Miller | 62dbec7 | 2005-11-07 14:09:58 -0800 | [diff] [blame] | 1112 |  | 
|  | 1113 | /* This currently is only used by the hugetlb arch pre-fault | 
|  | 1114 | * hook on UltraSPARC-III+ and later when changing the pagesize | 
|  | 1115 | * bits of the context register for an address space. | 
|  | 1116 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | void smp_flush_tlb_mm(struct mm_struct *mm) | 
|  | 1118 | { | 
| David S. Miller | 62dbec7 | 2005-11-07 14:09:58 -0800 | [diff] [blame] | 1119 | u32 ctx = CTX_HWBITS(mm->context); | 
|  | 1120 | int cpu = get_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 |  | 
| David S. Miller | 62dbec7 | 2005-11-07 14:09:58 -0800 | [diff] [blame] | 1122 | if (atomic_read(&mm->mm_users) == 1) { | 
|  | 1123 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | 
|  | 1124 | goto local_flush_and_out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1125 | } | 
| David S. Miller | 62dbec7 | 2005-11-07 14:09:58 -0800 | [diff] [blame] | 1126 |  | 
|  | 1127 | smp_cross_call_masked(&xcall_flush_tlb_mm, | 
|  | 1128 | ctx, 0, 0, | 
|  | 1129 | mm->cpu_vm_mask); | 
|  | 1130 |  | 
|  | 1131 | local_flush_and_out: | 
|  | 1132 | __flush_tlb_mm(ctx, SECONDARY_CONTEXT); | 
|  | 1133 |  | 
|  | 1134 | put_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 | } | 
|  | 1136 |  | 
|  | 1137 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) | 
|  | 1138 | { | 
|  | 1139 | u32 ctx = CTX_HWBITS(mm->context); | 
|  | 1140 | int cpu = get_cpu(); | 
|  | 1141 |  | 
| Hugh Dickins | dedeb00 | 2005-11-07 14:09:01 -0800 | [diff] [blame] | 1142 | if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1143 | mm->cpu_vm_mask = cpumask_of_cpu(cpu); | 
| Hugh Dickins | dedeb00 | 2005-11-07 14:09:01 -0800 | [diff] [blame] | 1144 | else | 
|  | 1145 | smp_cross_call_masked(&xcall_flush_tlb_pending, | 
|  | 1146 | ctx, nr, (unsigned long) vaddrs, | 
|  | 1147 | mm->cpu_vm_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | __flush_tlb_pending(ctx, nr, vaddrs); | 
|  | 1150 |  | 
|  | 1151 | put_cpu(); | 
|  | 1152 | } | 
|  | 1153 |  | 
|  | 1154 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) | 
|  | 1155 | { | 
|  | 1156 | start &= PAGE_MASK; | 
|  | 1157 | end    = PAGE_ALIGN(end); | 
|  | 1158 | if (start != end) { | 
|  | 1159 | smp_cross_call(&xcall_flush_tlb_kernel_range, | 
|  | 1160 | 0, start, end); | 
|  | 1161 |  | 
|  | 1162 | __flush_tlb_kernel_range(start, end); | 
|  | 1163 | } | 
|  | 1164 | } | 
|  | 1165 |  | 
|  | 1166 | /* CPU capture. */ | 
|  | 1167 | /* #define CAPTURE_DEBUG */ | 
|  | 1168 | extern unsigned long xcall_capture; | 
|  | 1169 |  | 
|  | 1170 | static atomic_t smp_capture_depth = ATOMIC_INIT(0); | 
|  | 1171 | static atomic_t smp_capture_registry = ATOMIC_INIT(0); | 
|  | 1172 | static unsigned long penguins_are_doing_time; | 
|  | 1173 |  | 
|  | 1174 | void smp_capture(void) | 
|  | 1175 | { | 
|  | 1176 | int result = atomic_add_ret(1, &smp_capture_depth); | 
|  | 1177 |  | 
|  | 1178 | if (result == 1) { | 
|  | 1179 | int ncpus = num_online_cpus(); | 
|  | 1180 |  | 
|  | 1181 | #ifdef CAPTURE_DEBUG | 
|  | 1182 | printk("CPU[%d]: Sending penguins to jail...", | 
|  | 1183 | smp_processor_id()); | 
|  | 1184 | #endif | 
|  | 1185 | penguins_are_doing_time = 1; | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 1186 | membar_storestore_loadstore(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 | atomic_inc(&smp_capture_registry); | 
|  | 1188 | smp_cross_call(&xcall_capture, 0, 0, 0); | 
|  | 1189 | while (atomic_read(&smp_capture_registry) != ncpus) | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 1190 | rmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | #ifdef CAPTURE_DEBUG | 
|  | 1192 | printk("done\n"); | 
|  | 1193 | #endif | 
|  | 1194 | } | 
|  | 1195 | } | 
|  | 1196 |  | 
|  | 1197 | void smp_release(void) | 
|  | 1198 | { | 
|  | 1199 | if (atomic_dec_and_test(&smp_capture_depth)) { | 
|  | 1200 | #ifdef CAPTURE_DEBUG | 
|  | 1201 | printk("CPU[%d]: Giving pardon to " | 
|  | 1202 | "imprisoned penguins\n", | 
|  | 1203 | smp_processor_id()); | 
|  | 1204 | #endif | 
|  | 1205 | penguins_are_doing_time = 0; | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 1206 | membar_storeload_storestore(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | atomic_dec(&smp_capture_registry); | 
|  | 1208 | } | 
|  | 1209 | } | 
|  | 1210 |  | 
|  | 1211 | /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they | 
|  | 1212 | * can service tlb flush xcalls... | 
|  | 1213 | */ | 
|  | 1214 | extern void prom_world(int); | 
| David S. Miller | 96c6e0d | 2006-01-31 18:32:29 -0800 | [diff] [blame] | 1215 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | void smp_penguin_jailcell(int irq, struct pt_regs *regs) | 
|  | 1217 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | clear_softint(1 << irq); | 
|  | 1219 |  | 
|  | 1220 | preempt_disable(); | 
|  | 1221 |  | 
|  | 1222 | __asm__ __volatile__("flushw"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | prom_world(1); | 
|  | 1224 | atomic_inc(&smp_capture_registry); | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 1225 | membar_storeload_storestore(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1226 | while (penguins_are_doing_time) | 
| David S. Miller | 4f07118 | 2005-08-29 12:46:22 -0700 | [diff] [blame] | 1227 | rmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | atomic_dec(&smp_capture_registry); | 
|  | 1229 | prom_world(0); | 
|  | 1230 |  | 
|  | 1231 | preempt_enable(); | 
|  | 1232 | } | 
|  | 1233 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1234 | /* /proc/profile writes can call this, don't __init it please. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 | int setup_profiling_timer(unsigned int multiplier) | 
|  | 1236 | { | 
| David S. Miller | 777a447 | 2007-02-22 06:24:10 -0800 | [diff] [blame] | 1237 | return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | } | 
|  | 1239 |  | 
|  | 1240 | void __init smp_prepare_cpus(unsigned int max_cpus) | 
|  | 1241 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1242 | } | 
|  | 1243 |  | 
|  | 1244 | void __devinit smp_prepare_boot_cpu(void) | 
|  | 1245 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1246 | } | 
|  | 1247 |  | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 1248 | void __devinit smp_fill_in_sib_core_maps(void) | 
|  | 1249 | { | 
|  | 1250 | unsigned int i; | 
|  | 1251 |  | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 1252 | for_each_present_cpu(i) { | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 1253 | unsigned int j; | 
|  | 1254 |  | 
| David S. Miller | 39dd992 | 2007-07-15 01:29:24 -0700 | [diff] [blame] | 1255 | cpus_clear(cpu_core_map[i]); | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 1256 | if (cpu_data(i).core_id == 0) { | 
| David S. Miller | f78eae2 | 2007-06-04 17:01:39 -0700 | [diff] [blame] | 1257 | cpu_set(i, cpu_core_map[i]); | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 1258 | continue; | 
|  | 1259 | } | 
|  | 1260 |  | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 1261 | for_each_present_cpu(j) { | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 1262 | if (cpu_data(i).core_id == | 
|  | 1263 | cpu_data(j).core_id) | 
| David S. Miller | f78eae2 | 2007-06-04 17:01:39 -0700 | [diff] [blame] | 1264 | cpu_set(j, cpu_core_map[i]); | 
|  | 1265 | } | 
|  | 1266 | } | 
|  | 1267 |  | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 1268 | for_each_present_cpu(i) { | 
| David S. Miller | f78eae2 | 2007-06-04 17:01:39 -0700 | [diff] [blame] | 1269 | unsigned int j; | 
|  | 1270 |  | 
| Mike Travis | d5a7430 | 2007-10-16 01:24:05 -0700 | [diff] [blame] | 1271 | cpus_clear(per_cpu(cpu_sibling_map, i)); | 
| David S. Miller | f78eae2 | 2007-06-04 17:01:39 -0700 | [diff] [blame] | 1272 | if (cpu_data(i).proc_id == -1) { | 
| Mike Travis | d5a7430 | 2007-10-16 01:24:05 -0700 | [diff] [blame] | 1273 | cpu_set(i, per_cpu(cpu_sibling_map, i)); | 
| David S. Miller | f78eae2 | 2007-06-04 17:01:39 -0700 | [diff] [blame] | 1274 | continue; | 
|  | 1275 | } | 
|  | 1276 |  | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 1277 | for_each_present_cpu(j) { | 
| David S. Miller | f78eae2 | 2007-06-04 17:01:39 -0700 | [diff] [blame] | 1278 | if (cpu_data(i).proc_id == | 
|  | 1279 | cpu_data(j).proc_id) | 
| Mike Travis | d5a7430 | 2007-10-16 01:24:05 -0700 | [diff] [blame] | 1280 | cpu_set(j, per_cpu(cpu_sibling_map, i)); | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 1281 | } | 
|  | 1282 | } | 
|  | 1283 | } | 
|  | 1284 |  | 
| Gautham R Shenoy | b282b6f | 2007-01-10 23:15:34 -0800 | [diff] [blame] | 1285 | int __cpuinit __cpu_up(unsigned int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | { | 
|  | 1287 | int ret = smp_boot_one_cpu(cpu); | 
|  | 1288 |  | 
|  | 1289 | if (!ret) { | 
|  | 1290 | cpu_set(cpu, smp_commenced_mask); | 
|  | 1291 | while (!cpu_isset(cpu, cpu_online_map)) | 
|  | 1292 | mb(); | 
|  | 1293 | if (!cpu_isset(cpu, cpu_online_map)) { | 
|  | 1294 | ret = -ENODEV; | 
|  | 1295 | } else { | 
| David S. Miller | 02fead7 | 2006-02-11 23:22:47 -0800 | [diff] [blame] | 1296 | /* On SUN4V, writes to %tick and %stick are | 
|  | 1297 | * not allowed. | 
|  | 1298 | */ | 
|  | 1299 | if (tlb_type != hypervisor) | 
|  | 1300 | smp_synchronize_one_tick(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | } | 
|  | 1302 | } | 
|  | 1303 | return ret; | 
|  | 1304 | } | 
|  | 1305 |  | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 1306 | #ifdef CONFIG_HOTPLUG_CPU | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 1307 | void cpu_play_dead(void) | 
|  | 1308 | { | 
|  | 1309 | int cpu = smp_processor_id(); | 
|  | 1310 | unsigned long pstate; | 
|  | 1311 |  | 
|  | 1312 | idle_task_exit(); | 
|  | 1313 |  | 
|  | 1314 | if (tlb_type == hypervisor) { | 
|  | 1315 | struct trap_per_cpu *tb = &trap_block[cpu]; | 
|  | 1316 |  | 
|  | 1317 | sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, | 
|  | 1318 | tb->cpu_mondo_pa, 0); | 
|  | 1319 | sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, | 
|  | 1320 | tb->dev_mondo_pa, 0); | 
|  | 1321 | sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, | 
|  | 1322 | tb->resum_mondo_pa, 0); | 
|  | 1323 | sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, | 
|  | 1324 | tb->nonresum_mondo_pa, 0); | 
|  | 1325 | } | 
|  | 1326 |  | 
|  | 1327 | cpu_clear(cpu, smp_commenced_mask); | 
|  | 1328 | membar_safe("#Sync"); | 
|  | 1329 |  | 
|  | 1330 | local_irq_disable(); | 
|  | 1331 |  | 
|  | 1332 | __asm__ __volatile__( | 
|  | 1333 | "rdpr	%%pstate, %0\n\t" | 
|  | 1334 | "wrpr	%0, %1, %%pstate" | 
|  | 1335 | : "=r" (pstate) | 
|  | 1336 | : "i" (PSTATE_IE)); | 
|  | 1337 |  | 
|  | 1338 | while (1) | 
|  | 1339 | barrier(); | 
|  | 1340 | } | 
|  | 1341 |  | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 1342 | int __cpu_disable(void) | 
|  | 1343 | { | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 1344 | int cpu = smp_processor_id(); | 
|  | 1345 | cpuinfo_sparc *c; | 
|  | 1346 | int i; | 
|  | 1347 |  | 
|  | 1348 | for_each_cpu_mask(i, cpu_core_map[cpu]) | 
|  | 1349 | cpu_clear(cpu, cpu_core_map[i]); | 
|  | 1350 | cpus_clear(cpu_core_map[cpu]); | 
|  | 1351 |  | 
| Mike Travis | d5a7430 | 2007-10-16 01:24:05 -0700 | [diff] [blame] | 1352 | for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) | 
|  | 1353 | cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); | 
|  | 1354 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 1355 |  | 
|  | 1356 | c = &cpu_data(cpu); | 
|  | 1357 |  | 
|  | 1358 | c->core_id = 0; | 
|  | 1359 | c->proc_id = -1; | 
|  | 1360 |  | 
|  | 1361 | spin_lock(&call_lock); | 
|  | 1362 | cpu_clear(cpu, cpu_online_map); | 
|  | 1363 | spin_unlock(&call_lock); | 
|  | 1364 |  | 
|  | 1365 | smp_wmb(); | 
|  | 1366 |  | 
|  | 1367 | /* Make sure no interrupts point to this cpu.  */ | 
|  | 1368 | fixup_irqs(); | 
|  | 1369 |  | 
|  | 1370 | local_irq_enable(); | 
|  | 1371 | mdelay(1); | 
|  | 1372 | local_irq_disable(); | 
|  | 1373 |  | 
|  | 1374 | return 0; | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 1375 | } | 
|  | 1376 |  | 
|  | 1377 | void __cpu_die(unsigned int cpu) | 
|  | 1378 | { | 
| David S. Miller | e020440 | 2007-07-16 03:49:40 -0700 | [diff] [blame] | 1379 | int i; | 
|  | 1380 |  | 
|  | 1381 | for (i = 0; i < 100; i++) { | 
|  | 1382 | smp_rmb(); | 
|  | 1383 | if (!cpu_isset(cpu, smp_commenced_mask)) | 
|  | 1384 | break; | 
|  | 1385 | msleep(100); | 
|  | 1386 | } | 
|  | 1387 | if (cpu_isset(cpu, smp_commenced_mask)) { | 
|  | 1388 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 
|  | 1389 | } else { | 
|  | 1390 | #if defined(CONFIG_SUN_LDOMS) | 
|  | 1391 | unsigned long hv_err; | 
|  | 1392 | int limit = 100; | 
|  | 1393 |  | 
|  | 1394 | do { | 
|  | 1395 | hv_err = sun4v_cpu_stop(cpu); | 
|  | 1396 | if (hv_err == HV_EOK) { | 
|  | 1397 | cpu_clear(cpu, cpu_present_map); | 
|  | 1398 | break; | 
|  | 1399 | } | 
|  | 1400 | } while (--limit > 0); | 
|  | 1401 | if (limit <= 0) { | 
|  | 1402 | printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", | 
|  | 1403 | hv_err); | 
|  | 1404 | } | 
|  | 1405 | #endif | 
|  | 1406 | } | 
| David S. Miller | 4f0234f | 2007-07-13 16:03:42 -0700 | [diff] [blame] | 1407 | } | 
|  | 1408 | #endif | 
|  | 1409 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1410 | void __init smp_cpus_done(unsigned int max_cpus) | 
|  | 1411 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1412 | } | 
|  | 1413 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1414 | void smp_send_reschedule(int cpu) | 
|  | 1415 | { | 
| Nick Piggin | 64c7c8f | 2005-11-08 21:39:04 -0800 | [diff] [blame] | 1416 | smp_receive_signal(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1417 | } | 
|  | 1418 |  | 
|  | 1419 | /* This is a nop because we capture all other cpus | 
|  | 1420 | * anyways when making the PROM active. | 
|  | 1421 | */ | 
|  | 1422 | void smp_send_stop(void) | 
|  | 1423 | { | 
|  | 1424 | } | 
|  | 1425 |  | 
| David S. Miller | d369ddd | 2005-07-10 15:45:11 -0700 | [diff] [blame] | 1426 | unsigned long __per_cpu_base __read_mostly; | 
|  | 1427 | unsigned long __per_cpu_shift __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1428 |  | 
|  | 1429 | EXPORT_SYMBOL(__per_cpu_base); | 
|  | 1430 | EXPORT_SYMBOL(__per_cpu_shift); | 
|  | 1431 |  | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 1432 | void __init real_setup_per_cpu_areas(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 | { | 
|  | 1434 | unsigned long goal, size, i; | 
|  | 1435 | char *ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1436 |  | 
|  | 1437 | /* Copy section for each CPU (we discard the original) */ | 
| David S. Miller | 5a08900 | 2006-12-14 23:40:57 -0800 | [diff] [blame] | 1438 | goal = PERCPU_ENOUGH_ROOM; | 
|  | 1439 |  | 
| Jeremy Fitzhardinge | b6e3590 | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 1440 | __per_cpu_shift = PAGE_SHIFT; | 
|  | 1441 | for (size = PAGE_SIZE; size < goal; size <<= 1UL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1442 | __per_cpu_shift++; | 
|  | 1443 |  | 
| Jeremy Fitzhardinge | b6e3590 | 2007-05-02 19:27:12 +0200 | [diff] [blame] | 1444 | ptr = alloc_bootmem_pages(size * NR_CPUS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 |  | 
|  | 1446 | __per_cpu_base = ptr - __per_cpu_start; | 
|  | 1447 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1448 | for (i = 0; i < NR_CPUS; i++, ptr += size) | 
|  | 1449 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 
| David S. Miller | 951bc82 | 2006-05-31 01:24:02 -0700 | [diff] [blame] | 1450 |  | 
|  | 1451 | /* Setup %g5 for the boot cpu.  */ | 
|  | 1452 | __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1453 | } |