| Glauber Costa | e32640a | 2008-03-03 14:12:45 -0300 | [diff] [blame] | 1 | /* | 
|  | 2 | * SMP stuff which is common to all sub-architectures. | 
|  | 3 | */ | 
|  | 4 | #include <linux/module.h> | 
|  | 5 | #include <asm/smp.h> | 
|  | 6 |  | 
|  | 7 | #ifdef CONFIG_X86_32 | 
|  | 8 | DEFINE_PER_CPU(unsigned long, this_cpu_off); | 
|  | 9 | EXPORT_PER_CPU_SYMBOL(this_cpu_off); | 
|  | 10 |  | 
|  | 11 | /* Initialize the CPU's GDT.  This is either the boot CPU doing itself | 
|  | 12 | (still using the master per-cpu area), or a CPU doing it for a | 
|  | 13 | secondary which will soon come up. */ | 
|  | 14 | __cpuinit void init_gdt(int cpu) | 
|  | 15 | { | 
|  | 16 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | 
|  | 17 |  | 
|  | 18 | pack_descriptor(&gdt[GDT_ENTRY_PERCPU], | 
|  | 19 | __per_cpu_offset[cpu], 0xFFFFF, | 
|  | 20 | 0x2 | DESCTYPE_S, 0x8); | 
|  | 21 |  | 
|  | 22 | gdt[GDT_ENTRY_PERCPU].s = 1; | 
|  | 23 |  | 
|  | 24 | per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu]; | 
|  | 25 | per_cpu(cpu_number, cpu) = cpu; | 
|  | 26 | } | 
|  | 27 | #endif | 
|  | 28 |  | 
|  | 29 | /** | 
|  | 30 | * smp_call_function(): Run a function on all other CPUs. | 
|  | 31 | * @func: The function to run. This must be fast and non-blocking. | 
|  | 32 | * @info: An arbitrary pointer to pass to the function. | 
|  | 33 | * @nonatomic: Unused. | 
|  | 34 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | 
|  | 35 | * | 
|  | 36 | * Returns 0 on success, else a negative status code. | 
|  | 37 | * | 
|  | 38 | * If @wait is true, then returns once @func has returned; otherwise | 
|  | 39 | * it returns just before the target cpu calls @func. | 
|  | 40 | * | 
|  | 41 | * You must not call this function with disabled interrupts or from a | 
|  | 42 | * hardware interrupt handler or from a bottom half handler. | 
|  | 43 | */ | 
|  | 44 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | 
|  | 45 | int wait) | 
|  | 46 | { | 
|  | 47 | return smp_call_function_mask(cpu_online_map, func, info, wait); | 
|  | 48 | } | 
|  | 49 | EXPORT_SYMBOL(smp_call_function); | 
|  | 50 |  | 
|  | 51 | /** | 
|  | 52 | * smp_call_function_single - Run a function on a specific CPU | 
|  | 53 | * @cpu: The target CPU.  Cannot be the calling CPU. | 
|  | 54 | * @func: The function to run. This must be fast and non-blocking. | 
|  | 55 | * @info: An arbitrary pointer to pass to the function. | 
|  | 56 | * @nonatomic: Unused. | 
|  | 57 | * @wait: If true, wait until function has completed on other CPUs. | 
|  | 58 | * | 
|  | 59 | * Returns 0 on success, else a negative status code. | 
|  | 60 | * | 
|  | 61 | * If @wait is true, then returns once @func has returned; otherwise | 
|  | 62 | * it returns just before the target cpu calls @func. | 
|  | 63 | */ | 
|  | 64 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | 
|  | 65 | int nonatomic, int wait) | 
|  | 66 | { | 
|  | 67 | /* prevent preemption and reschedule on another processor */ | 
|  | 68 | int ret; | 
|  | 69 | int me = get_cpu(); | 
|  | 70 | if (cpu == me) { | 
|  | 71 | local_irq_disable(); | 
|  | 72 | func(info); | 
|  | 73 | local_irq_enable(); | 
|  | 74 | put_cpu(); | 
|  | 75 | return 0; | 
|  | 76 | } | 
|  | 77 |  | 
|  | 78 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | 
|  | 79 |  | 
|  | 80 | put_cpu(); | 
|  | 81 | return ret; | 
|  | 82 | } | 
|  | 83 | EXPORT_SYMBOL(smp_call_function_single); |