| Glauber Costa | 0941ecb | 2008-03-03 14:12:55 -0300 | [diff] [blame] | 1 | /* | 
 | 2 |  *	Intel SMP support routines. | 
 | 3 |  * | 
| Alan Cox | 87c6fe2 | 2009-01-05 14:08:04 +0000 | [diff] [blame] | 4 |  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> | 
| Ingo Molnar | 8f47e16 | 2009-01-31 02:03:42 +0100 | [diff] [blame] | 5 |  *	(c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com> | 
| Glauber Costa | 0941ecb | 2008-03-03 14:12:55 -0300 | [diff] [blame] | 6 |  *      (c) 2002,2003 Andi Kleen, SuSE Labs. | 
 | 7 |  * | 
 | 8 |  *	i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com> | 
 | 9 |  * | 
 | 10 |  *	This code is released under the GNU General Public License version 2 or | 
 | 11 |  *	later. | 
 | 12 |  */ | 
 | 13 |  | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 14 | #include <linux/init.h> | 
 | 15 |  | 
 | 16 | #include <linux/mm.h> | 
 | 17 | #include <linux/delay.h> | 
 | 18 | #include <linux/spinlock.h> | 
| Paul Gortmaker | 69c60c8 | 2011-05-26 12:22:53 -0400 | [diff] [blame] | 19 | #include <linux/export.h> | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 20 | #include <linux/kernel_stat.h> | 
 | 21 | #include <linux/mc146818rtc.h> | 
 | 22 | #include <linux/cache.h> | 
 | 23 | #include <linux/interrupt.h> | 
 | 24 | #include <linux/cpu.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/gfp.h> | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 26 |  | 
 | 27 | #include <asm/mtrr.h> | 
 | 28 | #include <asm/tlbflush.h> | 
 | 29 | #include <asm/mmu_context.h> | 
 | 30 | #include <asm/proto.h> | 
| Ingo Molnar | 7b6aa33 | 2009-02-17 13:58:15 +0100 | [diff] [blame] | 31 | #include <asm/apic.h> | 
| Don Zickus | 7d007d2 | 2012-05-11 14:41:14 -0400 | [diff] [blame] | 32 | #include <asm/nmi.h> | 
| Glauber Costa | 0941ecb | 2008-03-03 14:12:55 -0300 | [diff] [blame] | 33 | /* | 
 | 34 |  *	Some notes on x86 processor bugs affecting SMP operation: | 
 | 35 |  * | 
 | 36 |  *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs. | 
 | 37 |  *	The Linux implications for SMP are handled as follows: | 
 | 38 |  * | 
 | 39 |  *	Pentium III / [Xeon] | 
 | 40 |  *		None of the E1AP-E3AP errata are visible to the user. | 
 | 41 |  * | 
 | 42 |  *	E1AP.	see PII A1AP | 
 | 43 |  *	E2AP.	see PII A2AP | 
 | 44 |  *	E3AP.	see PII A3AP | 
 | 45 |  * | 
 | 46 |  *	Pentium II / [Xeon] | 
 | 47 |  *		None of the A1AP-A3AP errata are visible to the user. | 
 | 48 |  * | 
 | 49 |  *	A1AP.	see PPro 1AP | 
 | 50 |  *	A2AP.	see PPro 2AP | 
 | 51 |  *	A3AP.	see PPro 7AP | 
 | 52 |  * | 
 | 53 |  *	Pentium Pro | 
 | 54 |  *		None of 1AP-9AP errata are visible to the normal user, | 
 | 55 |  *	except occasional delivery of 'spurious interrupt' as trap #15. | 
 | 56 |  *	This is very rare and a non-problem. | 
 | 57 |  * | 
 | 58 |  *	1AP.	Linux maps APIC as non-cacheable | 
 | 59 |  *	2AP.	worked around in hardware | 
 | 60 |  *	3AP.	fixed in C0 and above steppings microcode update. | 
 | 61 |  *		Linux does not use excessive STARTUP_IPIs. | 
 | 62 |  *	4AP.	worked around in hardware | 
 | 63 |  *	5AP.	symmetric IO mode (normal Linux operation) not affected. | 
 | 64 |  *		'noapic' mode has vector 0xf filled out properly. | 
 | 65 |  *	6AP.	'noapic' mode might be affected - fixed in later steppings | 
 | 66 |  *	7AP.	We do not assume writes to the LVT deassering IRQs | 
 | 67 |  *	8AP.	We do not enable low power mode (deep sleep) during MP bootup | 
 | 68 |  *	9AP.	We do not use mixed mode | 
 | 69 |  * | 
 | 70 |  *	Pentium | 
 | 71 |  *		There is a marginal case where REP MOVS on 100MHz SMP | 
 | 72 |  *	machines with B stepping processors can fail. XXX should provide | 
 | 73 |  *	an L1cache=Writethrough or L1cache=off option. | 
 | 74 |  * | 
 | 75 |  *		B stepping CPUs may hang. There are hardware work arounds | 
 | 76 |  *	for this. We warn about it in case your board doesn't have the work | 
 | 77 |  *	arounds. Basically that's so I can tell anyone with a B stepping | 
 | 78 |  *	CPU and SMP problems "tough". | 
 | 79 |  * | 
 | 80 |  *	Specific items [From Pentium Processor Specification Update] | 
 | 81 |  * | 
 | 82 |  *	1AP.	Linux doesn't use remote read | 
 | 83 |  *	2AP.	Linux doesn't trust APIC errors | 
 | 84 |  *	3AP.	We work around this | 
 | 85 |  *	4AP.	Linux never generated 3 interrupts of the same priority | 
 | 86 |  *		to cause a lost local interrupt. | 
 | 87 |  *	5AP.	Remote read is never used | 
 | 88 |  *	6AP.	not affected - worked around in hardware | 
 | 89 |  *	7AP.	not affected - worked around in hardware | 
 | 90 |  *	8AP.	worked around in hardware - we get explicit CS errors if not | 
 | 91 |  *	9AP.	only 'noapic' mode affected. Might generate spurious | 
 | 92 |  *		interrupts, we log only the first one and count the | 
 | 93 |  *		rest silently. | 
 | 94 |  *	10AP.	not affected - worked around in hardware | 
 | 95 |  *	11AP.	Linux reads the APIC between writes to avoid this, as per | 
 | 96 |  *		the documentation. Make sure you preserve this as it affects | 
 | 97 |  *		the C stepping chips too. | 
 | 98 |  *	12AP.	not affected - worked around in hardware | 
 | 99 |  *	13AP.	not affected - worked around in hardware | 
 | 100 |  *	14AP.	we always deassert INIT during bootup | 
 | 101 |  *	15AP.	not affected - worked around in hardware | 
 | 102 |  *	16AP.	not affected - worked around in hardware | 
 | 103 |  *	17AP.	not affected - worked around in hardware | 
 | 104 |  *	18AP.	not affected - worked around in hardware | 
 | 105 |  *	19AP.	not affected - worked around in BIOS | 
 | 106 |  * | 
 | 107 |  *	If this sounds worrying believe me these bugs are either ___RARE___, | 
 | 108 |  *	or are signal timing bugs worked around in hardware and there's | 
 | 109 |  *	about nothing of note with C stepping upwards. | 
 | 110 |  */ | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 111 |  | 
| Don Zickus | 7d007d2 | 2012-05-11 14:41:14 -0400 | [diff] [blame] | 112 | static atomic_t stopping_cpu = ATOMIC_INIT(-1); | 
| Don Zickus | 3aac27a | 2012-05-11 14:41:15 -0400 | [diff] [blame] | 113 | static bool smp_no_nmi_ipi = false; | 
| Don Zickus | 7d007d2 | 2012-05-11 14:41:14 -0400 | [diff] [blame] | 114 |  | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 115 | /* | 
 | 116 |  * this function sends a 'reschedule' IPI to another CPU. | 
 | 117 |  * it goes straight through and wastes no time serializing | 
 | 118 |  * anything. Worst case is that we lose a reschedule ... | 
 | 119 |  */ | 
 | 120 | static void native_smp_send_reschedule(int cpu) | 
 | 121 | { | 
| Gautham R Shenoy | f694010 | 2008-03-10 17:44:03 +0530 | [diff] [blame] | 122 | 	if (unlikely(cpu_is_offline(cpu))) { | 
 | 123 | 		WARN_ON(1); | 
 | 124 | 		return; | 
 | 125 | 	} | 
| Ingo Molnar | dac5f41 | 2009-01-28 15:42:24 +0100 | [diff] [blame] | 126 | 	apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 127 | } | 
 | 128 |  | 
| Jens Axboe | 3b16cf8 | 2008-06-26 11:21:54 +0200 | [diff] [blame] | 129 | void native_send_call_func_single_ipi(int cpu) | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 130 | { | 
| Ingo Molnar | dac5f41 | 2009-01-28 15:42:24 +0100 | [diff] [blame] | 131 | 	apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 132 | } | 
 | 133 |  | 
| Mike Travis | bcda016 | 2008-12-16 17:33:59 -0800 | [diff] [blame] | 134 | void native_send_call_func_ipi(const struct cpumask *mask) | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 135 | { | 
| Mike Travis | c2d1cec | 2009-01-04 05:18:03 -0800 | [diff] [blame] | 136 | 	cpumask_var_t allbutself; | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 137 |  | 
| Mike Travis | c2d1cec | 2009-01-04 05:18:03 -0800 | [diff] [blame] | 138 | 	if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { | 
| Ingo Molnar | dac5f41 | 2009-01-28 15:42:24 +0100 | [diff] [blame] | 139 | 		apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | 
| Mike Travis | c2d1cec | 2009-01-04 05:18:03 -0800 | [diff] [blame] | 140 | 		return; | 
 | 141 | 	} | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 142 |  | 
| Mike Travis | c2d1cec | 2009-01-04 05:18:03 -0800 | [diff] [blame] | 143 | 	cpumask_copy(allbutself, cpu_online_mask); | 
 | 144 | 	cpumask_clear_cpu(smp_processor_id(), allbutself); | 
 | 145 |  | 
 | 146 | 	if (cpumask_equal(mask, allbutself) && | 
 | 147 | 	    cpumask_equal(cpu_online_mask, cpu_callout_mask)) | 
| Ingo Molnar | dac5f41 | 2009-01-28 15:42:24 +0100 | [diff] [blame] | 148 | 		apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 149 | 	else | 
| Ingo Molnar | dac5f41 | 2009-01-28 15:42:24 +0100 | [diff] [blame] | 150 | 		apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | 
| Mike Travis | c2d1cec | 2009-01-04 05:18:03 -0800 | [diff] [blame] | 151 |  | 
 | 152 | 	free_cpumask_var(allbutself); | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 153 | } | 
 | 154 |  | 
| Don Zickus | 7d007d2 | 2012-05-11 14:41:14 -0400 | [diff] [blame] | 155 | static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) | 
 | 156 | { | 
 | 157 | 	/* We are registered on stopping cpu too, avoid spurious NMI */ | 
 | 158 | 	if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) | 
 | 159 | 		return NMI_HANDLED; | 
 | 160 |  | 
 | 161 | 	stop_this_cpu(NULL); | 
 | 162 |  | 
 | 163 | 	return NMI_HANDLED; | 
 | 164 | } | 
 | 165 |  | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 166 | /* | 
 | 167 |  * this function calls the 'stop' function on all other CPUs in the system. | 
 | 168 |  */ | 
 | 169 |  | 
| Andi Kleen | 4ef702c | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 170 | asmlinkage void smp_reboot_interrupt(void) | 
 | 171 | { | 
 | 172 | 	ack_APIC_irq(); | 
 | 173 | 	irq_enter(); | 
 | 174 | 	stop_this_cpu(NULL); | 
 | 175 | 	irq_exit(); | 
 | 176 | } | 
 | 177 |  | 
| Don Zickus | 5d2b86d | 2012-05-11 14:41:13 -0400 | [diff] [blame] | 178 | static void native_stop_other_cpus(int wait) | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 179 | { | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 180 | 	unsigned long flags; | 
| Alok Kataria | 76fac07 | 2010-10-11 14:37:08 -0700 | [diff] [blame] | 181 | 	unsigned long timeout; | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 182 |  | 
 | 183 | 	if (reboot_force) | 
 | 184 | 		return; | 
 | 185 |  | 
| Andi Kleen | 4ef702c | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 186 | 	/* | 
 | 187 | 	 * Use an own vector here because smp_call_function | 
 | 188 | 	 * does lots of things not suitable in a panic situation. | 
| Don Zickus | 7d007d2 | 2012-05-11 14:41:14 -0400 | [diff] [blame] | 189 | 	 */ | 
 | 190 |  | 
 | 191 | 	/* | 
 | 192 | 	 * We start by using the REBOOT_VECTOR irq. | 
 | 193 | 	 * The irq is treated as a sync point to allow critical | 
 | 194 | 	 * regions of code on other cpus to release their spin locks | 
 | 195 | 	 * and re-enable irqs.  Jumping straight to an NMI might | 
 | 196 | 	 * accidentally cause deadlocks with further shutdown/panic | 
 | 197 | 	 * code.  By syncing, we give the cpus up to one second to | 
 | 198 | 	 * finish their work before we force them off with the NMI. | 
| Andi Kleen | 4ef702c | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 199 | 	 */ | 
 | 200 | 	if (num_online_cpus() > 1) { | 
| Don Zickus | 7d007d2 | 2012-05-11 14:41:14 -0400 | [diff] [blame] | 201 | 		/* did someone beat us here? */ | 
 | 202 | 		if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1) | 
 | 203 | 			return; | 
 | 204 |  | 
 | 205 | 		/* sync above data before sending IRQ */ | 
 | 206 | 		wmb(); | 
 | 207 |  | 
| Andi Kleen | 4ef702c | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 208 | 		apic->send_IPI_allbutself(REBOOT_VECTOR); | 
 | 209 |  | 
| Alok Kataria | 76fac07 | 2010-10-11 14:37:08 -0700 | [diff] [blame] | 210 | 		/* | 
 | 211 | 		 * Don't wait longer than a second if the caller | 
 | 212 | 		 * didn't ask us to wait. | 
 | 213 | 		 */ | 
 | 214 | 		timeout = USEC_PER_SEC; | 
 | 215 | 		while (num_online_cpus() > 1 && (wait || timeout--)) | 
| Andi Kleen | 4ef702c | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 216 | 			udelay(1); | 
 | 217 | 	} | 
| Don Zickus | 7d007d2 | 2012-05-11 14:41:14 -0400 | [diff] [blame] | 218 | 	 | 
 | 219 | 	/* if the REBOOT_VECTOR didn't work, try with the NMI */ | 
| Don Zickus | 3aac27a | 2012-05-11 14:41:15 -0400 | [diff] [blame] | 220 | 	if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi))  { | 
| Don Zickus | 7d007d2 | 2012-05-11 14:41:14 -0400 | [diff] [blame] | 221 | 		if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, | 
 | 222 | 					 NMI_FLAG_FIRST, "smp_stop")) | 
 | 223 | 			/* Note: we ignore failures here */ | 
 | 224 | 			/* Hope the REBOOT_IRQ is good enough */ | 
 | 225 | 			goto finish; | 
| Andi Kleen | 4ef702c | 2009-05-27 21:56:52 +0200 | [diff] [blame] | 226 |  | 
| Don Zickus | 7d007d2 | 2012-05-11 14:41:14 -0400 | [diff] [blame] | 227 | 		/* sync above data before sending IRQ */ | 
 | 228 | 		wmb(); | 
 | 229 |  | 
 | 230 | 		pr_emerg("Shutting down cpus with NMI\n"); | 
 | 231 |  | 
 | 232 | 		apic->send_IPI_allbutself(NMI_VECTOR); | 
 | 233 |  | 
 | 234 | 		/* | 
 | 235 | 		 * Don't wait longer than a 10 ms if the caller | 
 | 236 | 		 * didn't ask us to wait. | 
 | 237 | 		 */ | 
 | 238 | 		timeout = USEC_PER_MSEC * 10; | 
 | 239 | 		while (num_online_cpus() > 1 && (wait || timeout--)) | 
 | 240 | 			udelay(1); | 
 | 241 | 	} | 
 | 242 |  | 
 | 243 | finish: | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 244 | 	local_irq_save(flags); | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 245 | 	disable_local_APIC(); | 
 | 246 | 	local_irq_restore(flags); | 
 | 247 | } | 
 | 248 |  | 
 | 249 | /* | 
| Peter Zijlstra | 184748c | 2011-04-05 17:23:39 +0200 | [diff] [blame] | 250 |  * Reschedule call back. | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 251 |  */ | 
 | 252 | void smp_reschedule_interrupt(struct pt_regs *regs) | 
 | 253 | { | 
 | 254 | 	ack_APIC_irq(); | 
| Hiroshi Shimamoto | 915b0d0 | 2008-12-08 19:19:26 -0800 | [diff] [blame] | 255 | 	inc_irq_stat(irq_resched_count); | 
| Peter Zijlstra | 184748c | 2011-04-05 17:23:39 +0200 | [diff] [blame] | 256 | 	scheduler_ipi(); | 
| Marcelo Tosatti | 32f8840 | 2009-05-07 17:55:12 -0300 | [diff] [blame] | 257 | 	/* | 
 | 258 | 	 * KVM uses this interrupt to force a cpu out of guest mode | 
 | 259 | 	 */ | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 260 | } | 
 | 261 |  | 
 | 262 | void smp_call_function_interrupt(struct pt_regs *regs) | 
 | 263 | { | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 264 | 	ack_APIC_irq(); | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 265 | 	irq_enter(); | 
| Jens Axboe | 3b16cf8 | 2008-06-26 11:21:54 +0200 | [diff] [blame] | 266 | 	generic_smp_call_function_interrupt(); | 
| Hiroshi Shimamoto | 915b0d0 | 2008-12-08 19:19:26 -0800 | [diff] [blame] | 267 | 	inc_irq_stat(irq_call_count); | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 268 | 	irq_exit(); | 
| Jens Axboe | 3b16cf8 | 2008-06-26 11:21:54 +0200 | [diff] [blame] | 269 | } | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 270 |  | 
| Jens Axboe | 5e374fb | 2008-07-01 13:12:04 +0200 | [diff] [blame] | 271 | void smp_call_function_single_interrupt(struct pt_regs *regs) | 
| Jens Axboe | 3b16cf8 | 2008-06-26 11:21:54 +0200 | [diff] [blame] | 272 | { | 
 | 273 | 	ack_APIC_irq(); | 
 | 274 | 	irq_enter(); | 
 | 275 | 	generic_smp_call_function_single_interrupt(); | 
| Hiroshi Shimamoto | 915b0d0 | 2008-12-08 19:19:26 -0800 | [diff] [blame] | 276 | 	inc_irq_stat(irq_call_count); | 
| Jens Axboe | 3b16cf8 | 2008-06-26 11:21:54 +0200 | [diff] [blame] | 277 | 	irq_exit(); | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 278 | } | 
 | 279 |  | 
| Don Zickus | bda6263 | 2011-10-13 15:14:27 -0400 | [diff] [blame] | 280 | static int __init nonmi_ipi_setup(char *str) | 
 | 281 | { | 
| Don Zickus | 3aac27a | 2012-05-11 14:41:15 -0400 | [diff] [blame] | 282 | 	smp_no_nmi_ipi = true; | 
 | 283 | 	return 1; | 
| Don Zickus | bda6263 | 2011-10-13 15:14:27 -0400 | [diff] [blame] | 284 | } | 
 | 285 |  | 
 | 286 | __setup("nonmi_ipi", nonmi_ipi_setup); | 
 | 287 |  | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 288 | struct smp_ops smp_ops = { | 
| Cyrill Gorcunov | b9b34f2 | 2009-04-12 20:47:42 +0400 | [diff] [blame] | 289 | 	.smp_prepare_boot_cpu	= native_smp_prepare_boot_cpu, | 
 | 290 | 	.smp_prepare_cpus	= native_smp_prepare_cpus, | 
 | 291 | 	.smp_cpus_done		= native_smp_cpus_done, | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 292 |  | 
| Don Zickus | 5d2b86d | 2012-05-11 14:41:13 -0400 | [diff] [blame] | 293 | 	.stop_other_cpus	= native_stop_other_cpus, | 
| Cyrill Gorcunov | b9b34f2 | 2009-04-12 20:47:42 +0400 | [diff] [blame] | 294 | 	.smp_send_reschedule	= native_smp_send_reschedule, | 
| Jens Axboe | 3b16cf8 | 2008-06-26 11:21:54 +0200 | [diff] [blame] | 295 |  | 
| Cyrill Gorcunov | b9b34f2 | 2009-04-12 20:47:42 +0400 | [diff] [blame] | 296 | 	.cpu_up			= native_cpu_up, | 
 | 297 | 	.cpu_die		= native_cpu_die, | 
 | 298 | 	.cpu_disable		= native_cpu_disable, | 
 | 299 | 	.play_dead		= native_play_dead, | 
| Alex Nixon | 93be71b | 2008-08-22 11:52:11 +0100 | [diff] [blame] | 300 |  | 
| Cyrill Gorcunov | b9b34f2 | 2009-04-12 20:47:42 +0400 | [diff] [blame] | 301 | 	.send_call_func_ipi	= native_send_call_func_ipi, | 
| Jens Axboe | 3b16cf8 | 2008-06-26 11:21:54 +0200 | [diff] [blame] | 302 | 	.send_call_func_single_ipi = native_send_call_func_single_ipi, | 
| Glauber Costa | f9e47a1 | 2008-03-03 14:12:52 -0300 | [diff] [blame] | 303 | }; | 
 | 304 | EXPORT_SYMBOL_GPL(smp_ops); |