| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/arm/kernel/smp.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 2002 ARM Limited, All Rights Reserved. | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | */ | 
|  | 10 | #include <linux/config.h> | 
|  | 11 | #include <linux/delay.h> | 
|  | 12 | #include <linux/init.h> | 
|  | 13 | #include <linux/spinlock.h> | 
|  | 14 | #include <linux/sched.h> | 
|  | 15 | #include <linux/interrupt.h> | 
|  | 16 | #include <linux/cache.h> | 
|  | 17 | #include <linux/profile.h> | 
|  | 18 | #include <linux/errno.h> | 
|  | 19 | #include <linux/mm.h> | 
|  | 20 | #include <linux/cpu.h> | 
|  | 21 | #include <linux/smp.h> | 
|  | 22 | #include <linux/seq_file.h> | 
|  | 23 |  | 
|  | 24 | #include <asm/atomic.h> | 
|  | 25 | #include <asm/cacheflush.h> | 
|  | 26 | #include <asm/cpu.h> | 
| Russell King | e65f38e | 2005-06-18 09:33:31 +0100 | [diff] [blame] | 27 | #include <asm/mmu_context.h> | 
|  | 28 | #include <asm/pgtable.h> | 
|  | 29 | #include <asm/pgalloc.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm/processor.h> | 
|  | 31 | #include <asm/tlbflush.h> | 
|  | 32 | #include <asm/ptrace.h> | 
|  | 33 |  | 
|  | 34 | /* | 
|  | 35 | * bitmask of present and online CPUs. | 
|  | 36 | * The present bitmask indicates that the CPU is physically present. | 
|  | 37 | * The online bitmask indicates that the CPU is up and running. | 
|  | 38 | */ | 
| Russell King | d12734d | 2005-07-11 17:38:36 +0100 | [diff] [blame] | 39 | cpumask_t cpu_possible_map; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | cpumask_t cpu_online_map; | 
|  | 41 |  | 
|  | 42 | /* | 
| Russell King | e65f38e | 2005-06-18 09:33:31 +0100 | [diff] [blame] | 43 | * as from 2.5, kernels no longer have an init_tasks structure | 
|  | 44 | * so we need some other way of telling a new secondary core | 
|  | 45 | * where to place its SVC stack | 
|  | 46 | */ | 
|  | 47 | struct secondary_data secondary_data; | 
|  | 48 |  | 
|  | 49 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | * structures for inter-processor calls | 
|  | 51 | * - A collection of single bit ipi messages. | 
|  | 52 | */ | 
|  | 53 | struct ipi_data { | 
|  | 54 | spinlock_t lock; | 
|  | 55 | unsigned long ipi_count; | 
|  | 56 | unsigned long bits; | 
|  | 57 | }; | 
|  | 58 |  | 
|  | 59 | static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { | 
|  | 60 | .lock	= SPIN_LOCK_UNLOCKED, | 
|  | 61 | }; | 
|  | 62 |  | 
|  | 63 | enum ipi_msg_type { | 
|  | 64 | IPI_TIMER, | 
|  | 65 | IPI_RESCHEDULE, | 
|  | 66 | IPI_CALL_FUNC, | 
|  | 67 | IPI_CPU_STOP, | 
|  | 68 | }; | 
|  | 69 |  | 
|  | 70 | struct smp_call_struct { | 
|  | 71 | void (*func)(void *info); | 
|  | 72 | void *info; | 
|  | 73 | int wait; | 
|  | 74 | cpumask_t pending; | 
|  | 75 | cpumask_t unfinished; | 
|  | 76 | }; | 
|  | 77 |  | 
|  | 78 | static struct smp_call_struct * volatile smp_call_function_data; | 
|  | 79 | static DEFINE_SPINLOCK(smp_call_function_lock); | 
|  | 80 |  | 
| Russell King | bd6f68a | 2005-07-17 21:35:41 +0100 | [diff] [blame] | 81 | int __cpuinit __cpu_up(unsigned int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | { | 
|  | 83 | struct task_struct *idle; | 
| Russell King | e65f38e | 2005-06-18 09:33:31 +0100 | [diff] [blame] | 84 | pgd_t *pgd; | 
|  | 85 | pmd_t *pmd; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | int ret; | 
|  | 87 |  | 
|  | 88 | /* | 
|  | 89 | * Spawn a new process manually.  Grab a pointer to | 
|  | 90 | * its task struct so we can mess with it | 
|  | 91 | */ | 
|  | 92 | idle = fork_idle(cpu); | 
|  | 93 | if (IS_ERR(idle)) { | 
|  | 94 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | 
|  | 95 | return PTR_ERR(idle); | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | /* | 
| Russell King | e65f38e | 2005-06-18 09:33:31 +0100 | [diff] [blame] | 99 | * Allocate initial page tables to allow the new CPU to | 
|  | 100 | * enable the MMU safely.  This essentially means a set | 
|  | 101 | * of our "standard" page tables, with the addition of | 
|  | 102 | * a 1:1 mapping for the physical address of the kernel. | 
|  | 103 | */ | 
|  | 104 | pgd = pgd_alloc(&init_mm); | 
|  | 105 | pmd = pmd_offset(pgd, PHYS_OFFSET); | 
|  | 106 | *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | | 
|  | 107 | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); | 
|  | 108 |  | 
|  | 109 | /* | 
|  | 110 | * We need to tell the secondary core where to find | 
|  | 111 | * its stack and the page tables. | 
|  | 112 | */ | 
| Russell King | 7db078be | 2005-09-04 11:03:15 +0100 | [diff] [blame] | 113 | secondary_data.stack = (void *)idle->thread_info + THREAD_START_SP; | 
| Russell King | e65f38e | 2005-06-18 09:33:31 +0100 | [diff] [blame] | 114 | secondary_data.pgdir = virt_to_phys(pgd); | 
|  | 115 | wmb(); | 
|  | 116 |  | 
|  | 117 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | * Now bring the CPU into our world. | 
|  | 119 | */ | 
|  | 120 | ret = boot_secondary(cpu, idle); | 
| Russell King | e65f38e | 2005-06-18 09:33:31 +0100 | [diff] [blame] | 121 | if (ret == 0) { | 
|  | 122 | unsigned long timeout; | 
|  | 123 |  | 
|  | 124 | /* | 
|  | 125 | * CPU was successfully started, wait for it | 
|  | 126 | * to come online or time out. | 
|  | 127 | */ | 
|  | 128 | timeout = jiffies + HZ; | 
|  | 129 | while (time_before(jiffies, timeout)) { | 
|  | 130 | if (cpu_online(cpu)) | 
|  | 131 | break; | 
|  | 132 |  | 
|  | 133 | udelay(10); | 
|  | 134 | barrier(); | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | if (!cpu_online(cpu)) | 
|  | 138 | ret = -EIO; | 
|  | 139 | } | 
|  | 140 |  | 
|  | 141 | secondary_data.stack = 0; | 
|  | 142 | secondary_data.pgdir = 0; | 
|  | 143 |  | 
|  | 144 | *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); | 
|  | 145 | pgd_free(pgd); | 
|  | 146 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | if (ret) { | 
| Russell King | 0908db2 | 2005-06-19 19:48:16 +0100 | [diff] [blame] | 148 | printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); | 
|  | 149 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | /* | 
|  | 151 | * FIXME: We need to clean up the new idle thread. --rmk | 
|  | 152 | */ | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | return ret; | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | /* | 
| Russell King | e65f38e | 2005-06-18 09:33:31 +0100 | [diff] [blame] | 159 | * This is the secondary CPU boot entry.  We're using this CPUs | 
|  | 160 | * idle thread stack, but a set of temporary page tables. | 
|  | 161 | */ | 
| Russell King | bd6f68a | 2005-07-17 21:35:41 +0100 | [diff] [blame] | 162 | asmlinkage void __cpuinit secondary_start_kernel(void) | 
| Russell King | e65f38e | 2005-06-18 09:33:31 +0100 | [diff] [blame] | 163 | { | 
|  | 164 | struct mm_struct *mm = &init_mm; | 
|  | 165 | unsigned int cpu = smp_processor_id(); | 
|  | 166 |  | 
|  | 167 | printk("CPU%u: Booted secondary processor\n", cpu); | 
|  | 168 |  | 
|  | 169 | /* | 
|  | 170 | * All kernel threads share the same mm context; grab a | 
|  | 171 | * reference and switch to it. | 
|  | 172 | */ | 
|  | 173 | atomic_inc(&mm->mm_users); | 
|  | 174 | atomic_inc(&mm->mm_count); | 
|  | 175 | current->active_mm = mm; | 
|  | 176 | cpu_set(cpu, mm->cpu_vm_mask); | 
|  | 177 | cpu_switch_mm(mm->pgd, mm); | 
|  | 178 | enter_lazy_tlb(mm, current); | 
| Russell King | 505d7b1 | 2005-07-28 20:32:47 +0100 | [diff] [blame] | 179 | local_flush_tlb_all(); | 
| Russell King | e65f38e | 2005-06-18 09:33:31 +0100 | [diff] [blame] | 180 |  | 
|  | 181 | cpu_init(); | 
|  | 182 |  | 
|  | 183 | /* | 
|  | 184 | * Give the platform a chance to do its own initialisation. | 
|  | 185 | */ | 
|  | 186 | platform_secondary_init(cpu); | 
|  | 187 |  | 
|  | 188 | /* | 
|  | 189 | * Enable local interrupts. | 
|  | 190 | */ | 
|  | 191 | local_irq_enable(); | 
|  | 192 | local_fiq_enable(); | 
|  | 193 |  | 
|  | 194 | calibrate_delay(); | 
|  | 195 |  | 
|  | 196 | smp_store_cpu_info(cpu); | 
|  | 197 |  | 
|  | 198 | /* | 
|  | 199 | * OK, now it's safe to let the boot CPU continue | 
|  | 200 | */ | 
|  | 201 | cpu_set(cpu, cpu_online_map); | 
|  | 202 |  | 
|  | 203 | /* | 
|  | 204 | * OK, it's off to the idle thread for us | 
|  | 205 | */ | 
|  | 206 | cpu_idle(); | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | * Called by both boot and secondaries to move global data into | 
|  | 211 | * per-processor storage. | 
|  | 212 | */ | 
| Russell King | bd6f68a | 2005-07-17 21:35:41 +0100 | [diff] [blame] | 213 | void __cpuinit smp_store_cpu_info(unsigned int cpuid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | { | 
|  | 215 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | 
|  | 216 |  | 
|  | 217 | cpu_info->loops_per_jiffy = loops_per_jiffy; | 
|  | 218 | } | 
|  | 219 |  | 
|  | 220 | void __init smp_cpus_done(unsigned int max_cpus) | 
|  | 221 | { | 
|  | 222 | int cpu; | 
|  | 223 | unsigned long bogosum = 0; | 
|  | 224 |  | 
|  | 225 | for_each_online_cpu(cpu) | 
|  | 226 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | 
|  | 227 |  | 
|  | 228 | printk(KERN_INFO "SMP: Total of %d processors activated " | 
|  | 229 | "(%lu.%02lu BogoMIPS).\n", | 
|  | 230 | num_online_cpus(), | 
|  | 231 | bogosum / (500000/HZ), | 
|  | 232 | (bogosum / (5000/HZ)) % 100); | 
|  | 233 | } | 
|  | 234 |  | 
|  | 235 | void __init smp_prepare_boot_cpu(void) | 
|  | 236 | { | 
|  | 237 | unsigned int cpu = smp_processor_id(); | 
|  | 238 |  | 
| Russell King | d12734d | 2005-07-11 17:38:36 +0100 | [diff] [blame] | 239 | cpu_set(cpu, cpu_possible_map); | 
| Russell King | 73eb7d9 | 2005-07-11 19:42:58 +0100 | [diff] [blame] | 240 | cpu_set(cpu, cpu_present_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | cpu_set(cpu, cpu_online_map); | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | 
|  | 245 | { | 
|  | 246 | unsigned long flags; | 
|  | 247 | unsigned int cpu; | 
|  | 248 |  | 
|  | 249 | local_irq_save(flags); | 
|  | 250 |  | 
|  | 251 | for_each_cpu_mask(cpu, callmap) { | 
|  | 252 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | 
|  | 253 |  | 
|  | 254 | spin_lock(&ipi->lock); | 
|  | 255 | ipi->bits |= 1 << msg; | 
|  | 256 | spin_unlock(&ipi->lock); | 
|  | 257 | } | 
|  | 258 |  | 
|  | 259 | /* | 
|  | 260 | * Call the platform specific cross-CPU call function. | 
|  | 261 | */ | 
|  | 262 | smp_cross_call(callmap); | 
|  | 263 |  | 
|  | 264 | local_irq_restore(flags); | 
|  | 265 | } | 
|  | 266 |  | 
|  | 267 | /* | 
|  | 268 | * You must not call this function with disabled interrupts, from a | 
|  | 269 | * hardware interrupt handler, nor from a bottom half handler. | 
|  | 270 | */ | 
|  | 271 | int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry, | 
|  | 272 | int wait, cpumask_t callmap) | 
|  | 273 | { | 
|  | 274 | struct smp_call_struct data; | 
|  | 275 | unsigned long timeout; | 
|  | 276 | int ret = 0; | 
|  | 277 |  | 
|  | 278 | data.func = func; | 
|  | 279 | data.info = info; | 
|  | 280 | data.wait = wait; | 
|  | 281 |  | 
|  | 282 | cpu_clear(smp_processor_id(), callmap); | 
|  | 283 | if (cpus_empty(callmap)) | 
|  | 284 | goto out; | 
|  | 285 |  | 
|  | 286 | data.pending = callmap; | 
|  | 287 | if (wait) | 
|  | 288 | data.unfinished = callmap; | 
|  | 289 |  | 
|  | 290 | /* | 
|  | 291 | * try to get the mutex on smp_call_function_data | 
|  | 292 | */ | 
|  | 293 | spin_lock(&smp_call_function_lock); | 
|  | 294 | smp_call_function_data = &data; | 
|  | 295 |  | 
|  | 296 | send_ipi_message(callmap, IPI_CALL_FUNC); | 
|  | 297 |  | 
|  | 298 | timeout = jiffies + HZ; | 
|  | 299 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | 
|  | 300 | barrier(); | 
|  | 301 |  | 
|  | 302 | /* | 
|  | 303 | * did we time out? | 
|  | 304 | */ | 
|  | 305 | if (!cpus_empty(data.pending)) { | 
|  | 306 | /* | 
|  | 307 | * this may be causing our panic - report it | 
|  | 308 | */ | 
|  | 309 | printk(KERN_CRIT | 
|  | 310 | "CPU%u: smp_call_function timeout for %p(%p)\n" | 
|  | 311 | "      callmap %lx pending %lx, %swait\n", | 
|  | 312 | smp_processor_id(), func, info, callmap, data.pending, | 
|  | 313 | wait ? "" : "no "); | 
|  | 314 |  | 
|  | 315 | /* | 
|  | 316 | * TRACE | 
|  | 317 | */ | 
|  | 318 | timeout = jiffies + (5 * HZ); | 
|  | 319 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | 
|  | 320 | barrier(); | 
|  | 321 |  | 
|  | 322 | if (cpus_empty(data.pending)) | 
|  | 323 | printk(KERN_CRIT "     RESOLVED\n"); | 
|  | 324 | else | 
|  | 325 | printk(KERN_CRIT "     STILL STUCK\n"); | 
|  | 326 | } | 
|  | 327 |  | 
|  | 328 | /* | 
|  | 329 | * whatever happened, we're done with the data, so release it | 
|  | 330 | */ | 
|  | 331 | smp_call_function_data = NULL; | 
|  | 332 | spin_unlock(&smp_call_function_lock); | 
|  | 333 |  | 
|  | 334 | if (!cpus_empty(data.pending)) { | 
|  | 335 | ret = -ETIMEDOUT; | 
|  | 336 | goto out; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | if (wait) | 
|  | 340 | while (!cpus_empty(data.unfinished)) | 
|  | 341 | barrier(); | 
|  | 342 | out: | 
|  | 343 |  | 
|  | 344 | return 0; | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | int smp_call_function(void (*func)(void *info), void *info, int retry, | 
|  | 348 | int wait) | 
|  | 349 | { | 
|  | 350 | return smp_call_function_on_cpu(func, info, retry, wait, | 
|  | 351 | cpu_online_map); | 
|  | 352 | } | 
|  | 353 |  | 
|  | 354 | void show_ipi_list(struct seq_file *p) | 
|  | 355 | { | 
|  | 356 | unsigned int cpu; | 
|  | 357 |  | 
|  | 358 | seq_puts(p, "IPI:"); | 
|  | 359 |  | 
| Russell King | e11b223 | 2005-07-11 19:26:31 +0100 | [diff] [blame] | 360 | for_each_present_cpu(cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); | 
|  | 362 |  | 
|  | 363 | seq_putc(p, '\n'); | 
|  | 364 | } | 
|  | 365 |  | 
|  | 366 | static void ipi_timer(struct pt_regs *regs) | 
|  | 367 | { | 
|  | 368 | int user = user_mode(regs); | 
|  | 369 |  | 
|  | 370 | irq_enter(); | 
|  | 371 | profile_tick(CPU_PROFILING, regs); | 
|  | 372 | update_process_times(user); | 
|  | 373 | irq_exit(); | 
|  | 374 | } | 
|  | 375 |  | 
|  | 376 | /* | 
|  | 377 | * ipi_call_function - handle IPI from smp_call_function() | 
|  | 378 | * | 
|  | 379 | * Note that we copy data out of the cross-call structure and then | 
|  | 380 | * let the caller know that we're here and have done with their data | 
|  | 381 | */ | 
|  | 382 | static void ipi_call_function(unsigned int cpu) | 
|  | 383 | { | 
|  | 384 | struct smp_call_struct *data = smp_call_function_data; | 
|  | 385 | void (*func)(void *info) = data->func; | 
|  | 386 | void *info = data->info; | 
|  | 387 | int wait = data->wait; | 
|  | 388 |  | 
|  | 389 | cpu_clear(cpu, data->pending); | 
|  | 390 |  | 
|  | 391 | func(info); | 
|  | 392 |  | 
|  | 393 | if (wait) | 
|  | 394 | cpu_clear(cpu, data->unfinished); | 
|  | 395 | } | 
|  | 396 |  | 
|  | 397 | static DEFINE_SPINLOCK(stop_lock); | 
|  | 398 |  | 
|  | 399 | /* | 
|  | 400 | * ipi_cpu_stop - handle IPI from smp_send_stop() | 
|  | 401 | */ | 
|  | 402 | static void ipi_cpu_stop(unsigned int cpu) | 
|  | 403 | { | 
|  | 404 | spin_lock(&stop_lock); | 
|  | 405 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | 
|  | 406 | dump_stack(); | 
|  | 407 | spin_unlock(&stop_lock); | 
|  | 408 |  | 
|  | 409 | cpu_clear(cpu, cpu_online_map); | 
|  | 410 |  | 
|  | 411 | local_fiq_disable(); | 
|  | 412 | local_irq_disable(); | 
|  | 413 |  | 
|  | 414 | while (1) | 
|  | 415 | cpu_relax(); | 
|  | 416 | } | 
|  | 417 |  | 
|  | 418 | /* | 
|  | 419 | * Main handler for inter-processor interrupts | 
|  | 420 | * | 
|  | 421 | * For ARM, the ipimask now only identifies a single | 
|  | 422 | * category of IPI (Bit 1 IPIs have been replaced by a | 
|  | 423 | * different mechanism): | 
|  | 424 | * | 
|  | 425 | *  Bit 0 - Inter-processor function call | 
|  | 426 | */ | 
|  | 427 | void do_IPI(struct pt_regs *regs) | 
|  | 428 | { | 
|  | 429 | unsigned int cpu = smp_processor_id(); | 
|  | 430 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | 
|  | 431 |  | 
|  | 432 | ipi->ipi_count++; | 
|  | 433 |  | 
|  | 434 | for (;;) { | 
|  | 435 | unsigned long msgs; | 
|  | 436 |  | 
|  | 437 | spin_lock(&ipi->lock); | 
|  | 438 | msgs = ipi->bits; | 
|  | 439 | ipi->bits = 0; | 
|  | 440 | spin_unlock(&ipi->lock); | 
|  | 441 |  | 
|  | 442 | if (!msgs) | 
|  | 443 | break; | 
|  | 444 |  | 
|  | 445 | do { | 
|  | 446 | unsigned nextmsg; | 
|  | 447 |  | 
|  | 448 | nextmsg = msgs & -msgs; | 
|  | 449 | msgs &= ~nextmsg; | 
|  | 450 | nextmsg = ffz(~nextmsg); | 
|  | 451 |  | 
|  | 452 | switch (nextmsg) { | 
|  | 453 | case IPI_TIMER: | 
|  | 454 | ipi_timer(regs); | 
|  | 455 | break; | 
|  | 456 |  | 
|  | 457 | case IPI_RESCHEDULE: | 
|  | 458 | /* | 
|  | 459 | * nothing more to do - eveything is | 
|  | 460 | * done on the interrupt return path | 
|  | 461 | */ | 
|  | 462 | break; | 
|  | 463 |  | 
|  | 464 | case IPI_CALL_FUNC: | 
|  | 465 | ipi_call_function(cpu); | 
|  | 466 | break; | 
|  | 467 |  | 
|  | 468 | case IPI_CPU_STOP: | 
|  | 469 | ipi_cpu_stop(cpu); | 
|  | 470 | break; | 
|  | 471 |  | 
|  | 472 | default: | 
|  | 473 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | 
|  | 474 | cpu, nextmsg); | 
|  | 475 | break; | 
|  | 476 | } | 
|  | 477 | } while (msgs); | 
|  | 478 | } | 
|  | 479 | } | 
|  | 480 |  | 
|  | 481 | void smp_send_reschedule(int cpu) | 
|  | 482 | { | 
|  | 483 | send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); | 
|  | 484 | } | 
|  | 485 |  | 
|  | 486 | void smp_send_timer(void) | 
|  | 487 | { | 
|  | 488 | cpumask_t mask = cpu_online_map; | 
|  | 489 | cpu_clear(smp_processor_id(), mask); | 
|  | 490 | send_ipi_message(mask, IPI_TIMER); | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | void smp_send_stop(void) | 
|  | 494 | { | 
|  | 495 | cpumask_t mask = cpu_online_map; | 
|  | 496 | cpu_clear(smp_processor_id(), mask); | 
|  | 497 | send_ipi_message(mask, IPI_CPU_STOP); | 
|  | 498 | } | 
|  | 499 |  | 
|  | 500 | /* | 
|  | 501 | * not supported here | 
|  | 502 | */ | 
|  | 503 | int __init setup_profiling_timer(unsigned int multiplier) | 
|  | 504 | { | 
|  | 505 | return -EINVAL; | 
|  | 506 | } | 
| Russell King | 4b0ef3b | 2005-06-28 13:49:16 +0100 | [diff] [blame] | 507 |  | 
|  | 508 | static int | 
|  | 509 | on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, | 
|  | 510 | cpumask_t mask) | 
|  | 511 | { | 
|  | 512 | int ret = 0; | 
|  | 513 |  | 
|  | 514 | preempt_disable(); | 
|  | 515 |  | 
|  | 516 | ret = smp_call_function_on_cpu(func, info, retry, wait, mask); | 
|  | 517 | if (cpu_isset(smp_processor_id(), mask)) | 
|  | 518 | func(info); | 
|  | 519 |  | 
|  | 520 | preempt_enable(); | 
|  | 521 |  | 
|  | 522 | return ret; | 
|  | 523 | } | 
|  | 524 |  | 
|  | 525 | /**********************************************************************/ | 
|  | 526 |  | 
|  | 527 | /* | 
|  | 528 | * TLB operations | 
|  | 529 | */ | 
|  | 530 | struct tlb_args { | 
|  | 531 | struct vm_area_struct *ta_vma; | 
|  | 532 | unsigned long ta_start; | 
|  | 533 | unsigned long ta_end; | 
|  | 534 | }; | 
|  | 535 |  | 
|  | 536 | static inline void ipi_flush_tlb_all(void *ignored) | 
|  | 537 | { | 
|  | 538 | local_flush_tlb_all(); | 
|  | 539 | } | 
|  | 540 |  | 
|  | 541 | static inline void ipi_flush_tlb_mm(void *arg) | 
|  | 542 | { | 
|  | 543 | struct mm_struct *mm = (struct mm_struct *)arg; | 
|  | 544 |  | 
|  | 545 | local_flush_tlb_mm(mm); | 
|  | 546 | } | 
|  | 547 |  | 
|  | 548 | static inline void ipi_flush_tlb_page(void *arg) | 
|  | 549 | { | 
|  | 550 | struct tlb_args *ta = (struct tlb_args *)arg; | 
|  | 551 |  | 
|  | 552 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); | 
|  | 553 | } | 
|  | 554 |  | 
|  | 555 | static inline void ipi_flush_tlb_kernel_page(void *arg) | 
|  | 556 | { | 
|  | 557 | struct tlb_args *ta = (struct tlb_args *)arg; | 
|  | 558 |  | 
|  | 559 | local_flush_tlb_kernel_page(ta->ta_start); | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | static inline void ipi_flush_tlb_range(void *arg) | 
|  | 563 | { | 
|  | 564 | struct tlb_args *ta = (struct tlb_args *)arg; | 
|  | 565 |  | 
|  | 566 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | 
|  | 567 | } | 
|  | 568 |  | 
|  | 569 | static inline void ipi_flush_tlb_kernel_range(void *arg) | 
|  | 570 | { | 
|  | 571 | struct tlb_args *ta = (struct tlb_args *)arg; | 
|  | 572 |  | 
|  | 573 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); | 
|  | 574 | } | 
|  | 575 |  | 
|  | 576 | void flush_tlb_all(void) | 
|  | 577 | { | 
|  | 578 | on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); | 
|  | 579 | } | 
|  | 580 |  | 
|  | 581 | void flush_tlb_mm(struct mm_struct *mm) | 
|  | 582 | { | 
|  | 583 | cpumask_t mask = mm->cpu_vm_mask; | 
|  | 584 |  | 
|  | 585 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); | 
|  | 586 | } | 
|  | 587 |  | 
|  | 588 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | 
|  | 589 | { | 
|  | 590 | cpumask_t mask = vma->vm_mm->cpu_vm_mask; | 
|  | 591 | struct tlb_args ta; | 
|  | 592 |  | 
|  | 593 | ta.ta_vma = vma; | 
|  | 594 | ta.ta_start = uaddr; | 
|  | 595 |  | 
|  | 596 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | void flush_tlb_kernel_page(unsigned long kaddr) | 
|  | 600 | { | 
|  | 601 | struct tlb_args ta; | 
|  | 602 |  | 
|  | 603 | ta.ta_start = kaddr; | 
|  | 604 |  | 
|  | 605 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); | 
|  | 606 | } | 
|  | 607 |  | 
|  | 608 | void flush_tlb_range(struct vm_area_struct *vma, | 
|  | 609 | unsigned long start, unsigned long end) | 
|  | 610 | { | 
|  | 611 | cpumask_t mask = vma->vm_mm->cpu_vm_mask; | 
|  | 612 | struct tlb_args ta; | 
|  | 613 |  | 
|  | 614 | ta.ta_vma = vma; | 
|  | 615 | ta.ta_start = start; | 
|  | 616 | ta.ta_end = end; | 
|  | 617 |  | 
|  | 618 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); | 
|  | 619 | } | 
|  | 620 |  | 
|  | 621 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 
|  | 622 | { | 
|  | 623 | struct tlb_args ta; | 
|  | 624 |  | 
|  | 625 | ta.ta_start = start; | 
|  | 626 | ta.ta_end = end; | 
|  | 627 |  | 
|  | 628 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); | 
|  | 629 | } |