blob: 1a953e26401c02138a972480184ce0208d42eb6f [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_SYSTEM_H
2#define _ASM_X86_SYSTEM_H
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +01003
4#include <asm/asm.h>
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +01005#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
Andi Kleenfde1b3f2008-01-30 13:32:38 +01008#include <asm/nops.h>
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +01009
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +010010#include <linux/kernel.h>
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +010011#include <linux/irqflags.h>
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +010012
Jan Beulichded9aa02008-01-30 13:31:24 +010013/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010020struct task_struct; /* one of the stranger aspects of C forward declarations */
Harvey Harrison599db4f2008-02-04 16:48:03 +010021struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next);
Jeremy Fitzhardinge2fb6b2a02009-02-27 13:25:33 -080023struct tss_struct;
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080024void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010026
Jaswinder Singhaab02f02008-12-15 22:23:54 +053027#ifdef CONFIG_X86_32
28
Tejun Heo60a53172009-02-09 22:17:40 +090029#ifdef CONFIG_CC_STACKPROTECTOR
30#define __switch_canary \
Tejun Heo5c79d2a2009-02-11 16:31:00 +090031 "movl %P[task_canary](%[next]), %%ebx\n\t" \
32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
Tejun Heo60a53172009-02-09 22:17:40 +090033#define __switch_canary_oparam \
Jeremy Fitzhardinge1ea0d142009-09-03 12:27:15 -070034 , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
Tejun Heo60a53172009-02-09 22:17:40 +090035#define __switch_canary_iparam \
Tejun Heo60a53172009-02-09 22:17:40 +090036 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
37#else /* CC_STACKPROTECTOR */
38#define __switch_canary
39#define __switch_canary_oparam
40#define __switch_canary_iparam
41#endif /* CC_STACKPROTECTOR */
42
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010043/*
44 * Saving eflags is important. It switches not only IOPL between tasks,
45 * it also protects other tasks from NT leaking through sysenter etc.
46 */
Ingo Molnar23b55bd2008-03-05 10:24:37 +010047#define switch_to(prev, next, last) \
48do { \
Ingo Molnar8b6451f2008-03-05 10:46:38 +010049 /* \
50 * Context-switching clobbers all registers, so we clobber \
51 * them explicitly, via unused output variables. \
52 * (EAX and EBP is not listed because EBP is saved/restored \
53 * explicitly for wchan access and EAX is the return value of \
54 * __switch_to()) \
55 */ \
56 unsigned long ebx, ecx, edx, esi, edi; \
Ingo Molnar23b55bd2008-03-05 10:24:37 +010057 \
Joe Perchesc5386c22008-03-23 01:03:39 -070058 asm volatile("pushfl\n\t" /* save flags */ \
59 "pushl %%ebp\n\t" /* save EBP */ \
60 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
61 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
62 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
63 "pushl %[next_ip]\n\t" /* restore EIP */ \
Tejun Heo5c79d2a2009-02-11 16:31:00 +090064 __switch_canary \
Joe Perchesc5386c22008-03-23 01:03:39 -070065 "jmp __switch_to\n" /* regparm call */ \
66 "1:\t" \
67 "popl %%ebp\n\t" /* restore EBP */ \
68 "popfl\n" /* restore flags */ \
Ingo Molnar23b55bd2008-03-05 10:24:37 +010069 \
Joe Perchesc5386c22008-03-23 01:03:39 -070070 /* output parameters */ \
71 : [prev_sp] "=m" (prev->thread.sp), \
72 [prev_ip] "=m" (prev->thread.ip), \
73 "=a" (last), \
Ingo Molnar23b55bd2008-03-05 10:24:37 +010074 \
Joe Perchesc5386c22008-03-23 01:03:39 -070075 /* clobbered output registers: */ \
76 "=b" (ebx), "=c" (ecx), "=d" (edx), \
77 "=S" (esi), "=D" (edi) \
78 \
Tejun Heo60a53172009-02-09 22:17:40 +090079 __switch_canary_oparam \
80 \
Joe Perchesc5386c22008-03-23 01:03:39 -070081 /* input parameters: */ \
82 : [next_sp] "m" (next->thread.sp), \
83 [next_ip] "m" (next->thread.ip), \
84 \
85 /* regparm parameters for __switch_to(): */ \
86 [prev] "a" (prev), \
Vegard Nossum33f8c402008-09-14 19:03:53 +020087 [next] "d" (next) \
88 \
Tejun Heo60a53172009-02-09 22:17:40 +090089 __switch_canary_iparam \
90 \
Vegard Nossum33f8c402008-09-14 19:03:53 +020091 : /* reloaded segment registers */ \
92 "memory"); \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +010093} while (0)
94
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +010095/*
96 * disable hlt during certain critical i/o operations
97 */
98#define HAVE_DISABLE_HLT
Thomas Gleixner96a388d2007-10-11 11:20:03 +020099#else
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100100#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
101#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
102
103/* frame pointer must be last for get_wchan */
104#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
105#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
106
107#define __EXTRA_CLOBBER \
108 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
109 "r12", "r13", "r14", "r15"
110
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900111#ifdef CONFIG_CC_STACKPROTECTOR
112#define __switch_canary \
113 "movq %P[task_canary](%%rsi),%%r8\n\t" \
Tejun Heo67e68bd2009-01-21 17:26:05 +0900114 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
115#define __switch_canary_oparam \
116 , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
117#define __switch_canary_iparam \
118 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900119#else /* CC_STACKPROTECTOR */
120#define __switch_canary
Tejun Heo67e68bd2009-01-21 17:26:05 +0900121#define __switch_canary_oparam
122#define __switch_canary_iparam
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900123#endif /* CC_STACKPROTECTOR */
124
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100125/* Save restore flags to clear handle leaking NT */
126#define switch_to(prev, next, last) \
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900127 asm volatile(SAVE_CONTEXT \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100128 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
129 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
130 "call __switch_to\n\t" \
Brian Gerst87b26402009-01-19 00:38:59 +0900131 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900132 __switch_canary \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100133 "movq %P[thread_info](%%rsi),%%r8\n\t" \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100134 "movq %%rax,%%rdi\n\t" \
Benjamin LaHaise7106a5a2009-01-10 23:00:22 -0500135 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
136 "jnz ret_from_fork\n\t" \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100137 RESTORE_CONTEXT \
138 : "=a" (last) \
Tejun Heo67e68bd2009-01-21 17:26:05 +0900139 __switch_canary_oparam \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100140 : [next] "S" (next), [prev] "D" (prev), \
141 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
142 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
Benjamin LaHaise7106a5a2009-01-10 23:00:22 -0500143 [_tif_fork] "i" (_TIF_FORK), \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100144 [thread_info] "i" (offsetof(struct task_struct, stack)), \
Tejun Heob4a8f7a2009-01-20 12:29:19 +0900145 [current_task] "m" (per_cpu_var(current_task)) \
Tejun Heo67e68bd2009-01-21 17:26:05 +0900146 __switch_canary_iparam \
Glauber de Oliveira Costa0a3b4d12008-01-30 13:31:08 +0100147 : "memory", "cc" __EXTRA_CLOBBER)
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200148#endif
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100149
150#ifdef __KERNEL__
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100151
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -0400152extern void native_load_gs_index(unsigned);
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100153
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100154/*
Glauber de Oliveira Costaa6b46552008-01-30 13:31:08 +0100155 * Load a segment. Fall back on loading the zero
156 * segment if something goes wrong..
157 */
158#define loadsegment(seg, value) \
159 asm volatile("\n" \
Joe Perchesc5386c22008-03-23 01:03:39 -0700160 "1:\t" \
161 "movl %k0,%%" #seg "\n" \
162 "2:\n" \
163 ".section .fixup,\"ax\"\n" \
164 "3:\t" \
165 "movl %k1, %%" #seg "\n\t" \
166 "jmp 2b\n" \
167 ".previous\n" \
168 _ASM_EXTABLE(1b,3b) \
Jeremy Fitzhardinged338c732008-06-25 00:18:58 -0400169 : :"r" (value), "r" (0) : "memory")
Glauber de Oliveira Costaa6b46552008-01-30 13:31:08 +0100170
171
172/*
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100173 * Save a segment register away
174 */
Joe Perchesc5386c22008-03-23 01:03:39 -0700175#define savesegment(seg, value) \
Ingo Molnard9fc3fd2008-07-11 19:41:19 +0200176 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100177
Tejun Heod9a89a22009-02-09 22:17:40 +0900178/*
179 * x86_32 user gs accessors.
180 */
181#ifdef CONFIG_X86_32
Tejun Heoccbeed32009-02-09 22:17:40 +0900182#ifdef CONFIG_X86_32_LAZY_GS
Tejun Heod9a89a22009-02-09 22:17:40 +0900183#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
184#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
185#define task_user_gs(tsk) ((tsk)->thread.gs)
Tejun Heoccbeed32009-02-09 22:17:40 +0900186#define lazy_save_gs(v) savesegment(gs, (v))
187#define lazy_load_gs(v) loadsegment(gs, (v))
188#else /* X86_32_LAZY_GS */
189#define get_user_gs(regs) (u16)((regs)->gs)
190#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
191#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
192#define lazy_save_gs(v) do { } while (0)
193#define lazy_load_gs(v) do { } while (0)
194#endif /* X86_32_LAZY_GS */
195#endif /* X86_32 */
Tejun Heod9a89a22009-02-09 22:17:40 +0900196
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100197static inline unsigned long get_limit(unsigned long segment)
198{
199 unsigned long __limit;
Joe Perchesc5386c22008-03-23 01:03:39 -0700200 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
201 return __limit + 1;
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100202}
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100203
204static inline void native_clts(void)
205{
Joe Perchesc5386c22008-03-23 01:03:39 -0700206 asm volatile("clts");
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100207}
208
209/*
210 * Volatile isn't enough to prevent the compiler from reordering the
211 * read/write functions for the control registers and messing everything up.
212 * A memory clobber would solve the problem, but would prevent reordering of
213 * all loads stores around it, which can hurt performance. Solution is to
214 * use a variable and mimic reads and writes to it to enforce serialization
215 */
216static unsigned long __force_order;
217
218static inline unsigned long native_read_cr0(void)
219{
220 unsigned long val;
Joe Perchesc5386c22008-03-23 01:03:39 -0700221 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100222 return val;
223}
224
225static inline void native_write_cr0(unsigned long val)
226{
Joe Perchesc5386c22008-03-23 01:03:39 -0700227 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100228}
229
230static inline unsigned long native_read_cr2(void)
231{
232 unsigned long val;
Joe Perchesc5386c22008-03-23 01:03:39 -0700233 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100234 return val;
235}
236
237static inline void native_write_cr2(unsigned long val)
238{
Joe Perchesc5386c22008-03-23 01:03:39 -0700239 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100240}
241
242static inline unsigned long native_read_cr3(void)
243{
244 unsigned long val;
Joe Perchesc5386c22008-03-23 01:03:39 -0700245 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100246 return val;
247}
248
249static inline void native_write_cr3(unsigned long val)
250{
Joe Perchesc5386c22008-03-23 01:03:39 -0700251 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100252}
253
254static inline unsigned long native_read_cr4(void)
255{
256 unsigned long val;
Joe Perchesc5386c22008-03-23 01:03:39 -0700257 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100258 return val;
259}
260
261static inline unsigned long native_read_cr4_safe(void)
262{
263 unsigned long val;
264 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
265 * exists, so it will never fail. */
266#ifdef CONFIG_X86_32
H. Peter Anvin88976ee2008-02-04 16:47:58 +0100267 asm volatile("1: mov %%cr4, %0\n"
268 "2:\n"
Joe Perchesc5386c22008-03-23 01:03:39 -0700269 _ASM_EXTABLE(1b, 2b)
H. Peter Anvin88976ee2008-02-04 16:47:58 +0100270 : "=r" (val), "=m" (__force_order) : "0" (0));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100271#else
272 val = native_read_cr4();
273#endif
274 return val;
275}
276
277static inline void native_write_cr4(unsigned long val)
278{
Joe Perchesc5386c22008-03-23 01:03:39 -0700279 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100280}
281
Glauber de Oliveira Costa94ea03c2008-01-30 13:33:19 +0100282#ifdef CONFIG_X86_64
283static inline unsigned long native_read_cr8(void)
284{
285 unsigned long cr8;
286 asm volatile("movq %%cr8,%0" : "=r" (cr8));
287 return cr8;
288}
289
290static inline void native_write_cr8(unsigned long val)
291{
292 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
293}
294#endif
295
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100296static inline void native_wbinvd(void)
297{
298 asm volatile("wbinvd": : :"memory");
299}
Joe Perchesc5386c22008-03-23 01:03:39 -0700300
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100301#ifdef CONFIG_PARAVIRT
302#include <asm/paravirt.h>
303#else
304#define read_cr0() (native_read_cr0())
305#define write_cr0(x) (native_write_cr0(x))
306#define read_cr2() (native_read_cr2())
307#define write_cr2(x) (native_write_cr2(x))
308#define read_cr3() (native_read_cr3())
309#define write_cr3(x) (native_write_cr3(x))
310#define read_cr4() (native_read_cr4())
311#define read_cr4_safe() (native_read_cr4_safe())
312#define write_cr4(x) (native_write_cr4(x))
313#define wbinvd() (native_wbinvd())
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +0100314#ifdef CONFIG_X86_64
Glauber de Oliveira Costa94ea03c2008-01-30 13:33:19 +0100315#define read_cr8() (native_read_cr8())
316#define write_cr8(x) (native_write_cr8(x))
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -0400317#define load_gs_index native_load_gs_index
Glauber de Oliveira Costad46d7d72008-01-30 13:31:08 +0100318#endif
319
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100320/* Clear the 'TS' bit */
321#define clts() (native_clts())
322
323#endif/* CONFIG_PARAVIRT */
324
Jeremy Fitzhardinge4e09e212008-05-26 23:31:03 +0100325#define stts() write_cr0(read_cr0() | X86_CR0_TS)
Glauber de Oliveira Costad3ca9012008-01-30 13:31:08 +0100326
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100327#endif /* __KERNEL__ */
328
H. Peter Anvin84fb1442008-02-04 16:48:00 +0100329static inline void clflush(volatile void *__p)
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100330{
H. Peter Anvin84fb1442008-02-04 16:48:00 +0100331 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100332}
333
Joe Perchesc5386c22008-03-23 01:03:39 -0700334#define nop() asm volatile ("nop")
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100335
336void disable_hlt(void);
337void enable_hlt(void);
338
Glauber de Oliveira Costad8954222008-01-30 13:31:08 +0100339void cpu_idle_wait(void);
340
341extern unsigned long arch_align_stack(unsigned long sp);
342extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
343
344void default_idle(void);
345
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +0100346void stop_this_cpu(void *dummy);
347
Glauber de Oliveira Costa833d8462008-01-30 13:31:08 +0100348/*
349 * Force strict CPU ordering.
350 * And yes, this is required on UP too when we're talking
351 * to devices.
352 */
353#ifdef CONFIG_X86_32
354/*
Pavel Machek0d7a1812008-03-03 12:49:09 +0100355 * Some non-Intel clones support out of order store. wmb() ceases to be a
Glauber de Oliveira Costa833d8462008-01-30 13:31:08 +0100356 * nop for these.
357 */
358#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
359#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
360#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
361#else
362#define mb() asm volatile("mfence":::"memory")
363#define rmb() asm volatile("lfence":::"memory")
364#define wmb() asm volatile("sfence" ::: "memory")
365#endif
366
367/**
368 * read_barrier_depends - Flush all pending reads that subsequents reads
369 * depend on.
370 *
371 * No data-dependent reads from memory-like regions are ever reordered
372 * over this barrier. All reads preceding this primitive are guaranteed
373 * to access memory (but not necessarily other CPUs' caches) before any
374 * reads following this primitive that depend on the data return by
375 * any of the preceding reads. This primitive is much lighter weight than
376 * rmb() on most CPUs, and is never heavier weight than is
377 * rmb().
378 *
379 * These ordering constraints are respected by both the local CPU
380 * and the compiler.
381 *
382 * Ordering is not guaranteed by anything other than these primitives,
383 * not even by data dependencies. See the documentation for
384 * memory_barrier() for examples and URLs to more information.
385 *
386 * For example, the following code would force ordering (the initial
387 * value of "a" is zero, "b" is one, and "p" is "&a"):
388 *
389 * <programlisting>
390 * CPU 0 CPU 1
391 *
392 * b = 2;
393 * memory_barrier();
394 * p = &b; q = p;
395 * read_barrier_depends();
396 * d = *q;
397 * </programlisting>
398 *
399 * because the read of "*q" depends on the read of "p" and these
400 * two reads are separated by a read_barrier_depends(). However,
401 * the following code, with the same initial values for "a" and "b":
402 *
403 * <programlisting>
404 * CPU 0 CPU 1
405 *
406 * a = 2;
407 * memory_barrier();
408 * b = 3; y = b;
409 * read_barrier_depends();
410 * x = a;
411 * </programlisting>
412 *
413 * does not enforce ordering, since there is no data dependency between
414 * the read of "a" and the read of "b". Therefore, on some CPUs, such
415 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
416 * in cases like this where there are no data dependencies.
417 **/
418
419#define read_barrier_depends() do { } while (0)
420
421#ifdef CONFIG_SMP
422#define smp_mb() mb()
423#ifdef CONFIG_X86_PPRO_FENCE
424# define smp_rmb() rmb()
425#else
426# define smp_rmb() barrier()
427#endif
428#ifdef CONFIG_X86_OOSTORE
429# define smp_wmb() wmb()
430#else
431# define smp_wmb() barrier()
432#endif
433#define smp_read_barrier_depends() read_barrier_depends()
Joe Perchesc5386c22008-03-23 01:03:39 -0700434#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
Glauber de Oliveira Costa833d8462008-01-30 13:31:08 +0100435#else
436#define smp_mb() barrier()
437#define smp_rmb() barrier()
438#define smp_wmb() barrier()
439#define smp_read_barrier_depends() do { } while (0)
440#define set_mb(var, value) do { var = value; barrier(); } while (0)
441#endif
442
Andi Kleenfde1b3f2008-01-30 13:32:38 +0100443/*
444 * Stop RDTSC speculation. This is needed when you need to use RDTSC
445 * (or get_cycles or vread that possibly accesses the TSC) in a defined
446 * code region.
447 *
448 * (Could use an alternative three way for this if there was one.)
449 */
450static inline void rdtsc_barrier(void)
451{
452 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
453 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
454}
Glauber de Oliveira Costa833d8462008-01-30 13:31:08 +0100455
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700456#endif /* _ASM_X86_SYSTEM_H */