| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_SYSTEM_H | 
 | 2 | #define _ASM_X86_SYSTEM_H | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 3 |  | 
 | 4 | #include <asm/asm.h> | 
| Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 5 | #include <asm/segment.h> | 
 | 6 | #include <asm/cpufeature.h> | 
 | 7 | #include <asm/cmpxchg.h> | 
| Andi Kleen | fde1b3f | 2008-01-30 13:32:38 +0100 | [diff] [blame] | 8 | #include <asm/nops.h> | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 9 |  | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 10 | #include <linux/kernel.h> | 
| Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 11 | #include <linux/irqflags.h> | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 12 |  | 
| Jan Beulich | ded9aa0 | 2008-01-30 13:31:24 +0100 | [diff] [blame] | 13 | /* entries in ARCH_DLINFO: */ | 
 | 14 | #ifdef CONFIG_IA32_EMULATION | 
 | 15 | # define AT_VECTOR_SIZE_ARCH 2 | 
 | 16 | #else | 
 | 17 | # define AT_VECTOR_SIZE_ARCH 1 | 
 | 18 | #endif | 
 | 19 |  | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 20 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | 
| Harvey Harrison | 599db4f | 2008-02-04 16:48:03 +0100 | [diff] [blame] | 21 | struct task_struct *__switch_to(struct task_struct *prev, | 
 | 22 | 				struct task_struct *next); | 
| Jeremy Fitzhardinge | 2fb6b2a | 2009-02-27 13:25:33 -0800 | [diff] [blame] | 23 | struct tss_struct; | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 24 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 
 | 25 | 		      struct tss_struct *tss); | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 26 |  | 
| Jaswinder Singh | aab02f0 | 2008-12-15 22:23:54 +0530 | [diff] [blame] | 27 | #ifdef CONFIG_X86_32 | 
 | 28 |  | 
| Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 29 | #ifdef CONFIG_CC_STACKPROTECTOR | 
 | 30 | #define __switch_canary							\ | 
| Tejun Heo | 5c79d2a | 2009-02-11 16:31:00 +0900 | [diff] [blame] | 31 | 	"movl %P[task_canary](%[next]), %%ebx\n\t"			\ | 
 | 32 | 	"movl %%ebx, "__percpu_arg([stack_canary])"\n\t" | 
| Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 33 | #define __switch_canary_oparam						\ | 
| Jeremy Fitzhardinge | 1ea0d14 | 2009-09-03 12:27:15 -0700 | [diff] [blame] | 34 | 	, [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) | 
| Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 35 | #define __switch_canary_iparam						\ | 
| Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 36 | 	, [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 
 | 37 | #else	/* CC_STACKPROTECTOR */ | 
 | 38 | #define __switch_canary | 
 | 39 | #define __switch_canary_oparam | 
 | 40 | #define __switch_canary_iparam | 
 | 41 | #endif	/* CC_STACKPROTECTOR */ | 
 | 42 |  | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 43 | /* | 
 | 44 |  * Saving eflags is important. It switches not only IOPL between tasks, | 
 | 45 |  * it also protects other tasks from NT leaking through sysenter etc. | 
 | 46 |  */ | 
| Ingo Molnar | 23b55bd | 2008-03-05 10:24:37 +0100 | [diff] [blame] | 47 | #define switch_to(prev, next, last)					\ | 
 | 48 | do {									\ | 
| Ingo Molnar | 8b6451f | 2008-03-05 10:46:38 +0100 | [diff] [blame] | 49 | 	/*								\ | 
 | 50 | 	 * Context-switching clobbers all registers, so we clobber	\ | 
 | 51 | 	 * them explicitly, via unused output variables.		\ | 
 | 52 | 	 * (EAX and EBP is not listed because EBP is saved/restored	\ | 
 | 53 | 	 * explicitly for wchan access and EAX is the return value of	\ | 
 | 54 | 	 * __switch_to())						\ | 
 | 55 | 	 */								\ | 
 | 56 | 	unsigned long ebx, ecx, edx, esi, edi;				\ | 
| Ingo Molnar | 23b55bd | 2008-03-05 10:24:37 +0100 | [diff] [blame] | 57 | 									\ | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 58 | 	asm volatile("pushfl\n\t"		/* save    flags */	\ | 
 | 59 | 		     "pushl %%ebp\n\t"		/* save    EBP   */	\ | 
 | 60 | 		     "movl %%esp,%[prev_sp]\n\t"	/* save    ESP   */ \ | 
 | 61 | 		     "movl %[next_sp],%%esp\n\t"	/* restore ESP   */ \ | 
 | 62 | 		     "movl $1f,%[prev_ip]\n\t"	/* save    EIP   */	\ | 
 | 63 | 		     "pushl %[next_ip]\n\t"	/* restore EIP   */	\ | 
| Tejun Heo | 5c79d2a | 2009-02-11 16:31:00 +0900 | [diff] [blame] | 64 | 		     __switch_canary					\ | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 65 | 		     "jmp __switch_to\n"	/* regparm call  */	\ | 
 | 66 | 		     "1:\t"						\ | 
 | 67 | 		     "popl %%ebp\n\t"		/* restore EBP   */	\ | 
 | 68 | 		     "popfl\n"			/* restore flags */	\ | 
| Ingo Molnar | 23b55bd | 2008-03-05 10:24:37 +0100 | [diff] [blame] | 69 | 									\ | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 70 | 		     /* output parameters */				\ | 
 | 71 | 		     : [prev_sp] "=m" (prev->thread.sp),		\ | 
 | 72 | 		       [prev_ip] "=m" (prev->thread.ip),		\ | 
 | 73 | 		       "=a" (last),					\ | 
| Ingo Molnar | 23b55bd | 2008-03-05 10:24:37 +0100 | [diff] [blame] | 74 | 									\ | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 75 | 		       /* clobbered output registers: */		\ | 
 | 76 | 		       "=b" (ebx), "=c" (ecx), "=d" (edx),		\ | 
 | 77 | 		       "=S" (esi), "=D" (edi)				\ | 
 | 78 | 		       							\ | 
| Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 79 | 		       __switch_canary_oparam				\ | 
 | 80 | 									\ | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 81 | 		       /* input parameters: */				\ | 
 | 82 | 		     : [next_sp]  "m" (next->thread.sp),		\ | 
 | 83 | 		       [next_ip]  "m" (next->thread.ip),		\ | 
 | 84 | 		       							\ | 
 | 85 | 		       /* regparm parameters for __switch_to(): */	\ | 
 | 86 | 		       [prev]     "a" (prev),				\ | 
| Vegard Nossum | 33f8c40 | 2008-09-14 19:03:53 +0200 | [diff] [blame] | 87 | 		       [next]     "d" (next)				\ | 
 | 88 | 									\ | 
| Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 89 | 		       __switch_canary_iparam				\ | 
 | 90 | 									\ | 
| Vegard Nossum | 33f8c40 | 2008-09-14 19:03:53 +0200 | [diff] [blame] | 91 | 		     : /* reloaded segment registers */			\ | 
 | 92 | 			"memory");					\ | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 93 | } while (0) | 
 | 94 |  | 
| Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 95 | /* | 
 | 96 |  * disable hlt during certain critical i/o operations | 
 | 97 |  */ | 
 | 98 | #define HAVE_DISABLE_HLT | 
| Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 99 | #else | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 100 | #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" | 
 | 101 | #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" | 
 | 102 |  | 
 | 103 | /* frame pointer must be last for get_wchan */ | 
 | 104 | #define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" | 
 | 105 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" | 
 | 106 |  | 
 | 107 | #define __EXTRA_CLOBBER  \ | 
 | 108 | 	, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | 
 | 109 | 	  "r12", "r13", "r14", "r15" | 
 | 110 |  | 
| Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 111 | #ifdef CONFIG_CC_STACKPROTECTOR | 
 | 112 | #define __switch_canary							  \ | 
 | 113 | 	"movq %P[task_canary](%%rsi),%%r8\n\t"				  \ | 
| Tejun Heo | 67e68bd | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 114 | 	"movq %%r8,"__percpu_arg([gs_canary])"\n\t" | 
 | 115 | #define __switch_canary_oparam						  \ | 
 | 116 | 	, [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) | 
 | 117 | #define __switch_canary_iparam						  \ | 
 | 118 | 	, [task_canary] "i" (offsetof(struct task_struct, stack_canary)) | 
| Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 119 | #else	/* CC_STACKPROTECTOR */ | 
 | 120 | #define __switch_canary | 
| Tejun Heo | 67e68bd | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 121 | #define __switch_canary_oparam | 
 | 122 | #define __switch_canary_iparam | 
| Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 123 | #endif	/* CC_STACKPROTECTOR */ | 
 | 124 |  | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 125 | /* Save restore flags to clear handle leaking NT */ | 
 | 126 | #define switch_to(prev, next, last) \ | 
| Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 127 | 	asm volatile(SAVE_CONTEXT					  \ | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 128 | 	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \ | 
 | 129 | 	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \ | 
 | 130 | 	     "call __switch_to\n\t"					  \ | 
 | 131 | 	     ".globl thread_return\n"					  \ | 
 | 132 | 	     "thread_return:\n\t"					  \ | 
| Brian Gerst | 87b2640 | 2009-01-19 00:38:59 +0900 | [diff] [blame] | 133 | 	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \ | 
| Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 134 | 	     __switch_canary						  \ | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 135 | 	     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \ | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 136 | 	     "movq %%rax,%%rdi\n\t" 					  \ | 
| Benjamin LaHaise | 7106a5a | 2009-01-10 23:00:22 -0500 | [diff] [blame] | 137 | 	     "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"	  \ | 
 | 138 | 	     "jnz   ret_from_fork\n\t"					  \ | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 139 | 	     RESTORE_CONTEXT						  \ | 
 | 140 | 	     : "=a" (last)					  	  \ | 
| Tejun Heo | 67e68bd | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 141 | 	       __switch_canary_oparam					  \ | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 142 | 	     : [next] "S" (next), [prev] "D" (prev),			  \ | 
 | 143 | 	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ | 
 | 144 | 	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \ | 
| Benjamin LaHaise | 7106a5a | 2009-01-10 23:00:22 -0500 | [diff] [blame] | 145 | 	       [_tif_fork] "i" (_TIF_FORK),			  	  \ | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 146 | 	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \ | 
| Tejun Heo | b4a8f7a | 2009-01-20 12:29:19 +0900 | [diff] [blame] | 147 | 	       [current_task] "m" (per_cpu_var(current_task))		  \ | 
| Tejun Heo | 67e68bd | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 148 | 	       __switch_canary_iparam					  \ | 
| Glauber de Oliveira Costa | 0a3b4d1 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 149 | 	     : "memory", "cc" __EXTRA_CLOBBER) | 
| Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 150 | #endif | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 151 |  | 
 | 152 | #ifdef __KERNEL__ | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 153 |  | 
| Jeremy Fitzhardinge | 9f9d489 | 2008-06-25 00:19:32 -0400 | [diff] [blame] | 154 | extern void native_load_gs_index(unsigned); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 155 |  | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 156 | /* | 
| Glauber de Oliveira Costa | a6b4655 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 157 |  * Load a segment. Fall back on loading the zero | 
 | 158 |  * segment if something goes wrong.. | 
 | 159 |  */ | 
 | 160 | #define loadsegment(seg, value)			\ | 
 | 161 | 	asm volatile("\n"			\ | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 162 | 		     "1:\t"			\ | 
 | 163 | 		     "movl %k0,%%" #seg "\n"	\ | 
 | 164 | 		     "2:\n"			\ | 
 | 165 | 		     ".section .fixup,\"ax\"\n"	\ | 
 | 166 | 		     "3:\t"			\ | 
 | 167 | 		     "movl %k1, %%" #seg "\n\t"	\ | 
 | 168 | 		     "jmp 2b\n"			\ | 
 | 169 | 		     ".previous\n"		\ | 
 | 170 | 		     _ASM_EXTABLE(1b,3b)	\ | 
| Jeremy Fitzhardinge | d338c73 | 2008-06-25 00:18:58 -0400 | [diff] [blame] | 171 | 		     : :"r" (value), "r" (0) : "memory") | 
| Glauber de Oliveira Costa | a6b4655 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 172 |  | 
 | 173 |  | 
 | 174 | /* | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 175 |  * Save a segment register away | 
 | 176 |  */ | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 177 | #define savesegment(seg, value)				\ | 
| Ingo Molnar | d9fc3fd | 2008-07-11 19:41:19 +0200 | [diff] [blame] | 178 | 	asm("mov %%" #seg ",%0":"=r" (value) : : "memory") | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 179 |  | 
| Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 180 | /* | 
 | 181 |  * x86_32 user gs accessors. | 
 | 182 |  */ | 
 | 183 | #ifdef CONFIG_X86_32 | 
| Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 184 | #ifdef CONFIG_X86_32_LAZY_GS | 
| Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 185 | #define get_user_gs(regs)	(u16)({unsigned long v; savesegment(gs, v); v;}) | 
 | 186 | #define set_user_gs(regs, v)	loadsegment(gs, (unsigned long)(v)) | 
 | 187 | #define task_user_gs(tsk)	((tsk)->thread.gs) | 
| Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 188 | #define lazy_save_gs(v)		savesegment(gs, (v)) | 
 | 189 | #define lazy_load_gs(v)		loadsegment(gs, (v)) | 
 | 190 | #else	/* X86_32_LAZY_GS */ | 
 | 191 | #define get_user_gs(regs)	(u16)((regs)->gs) | 
 | 192 | #define set_user_gs(regs, v)	do { (regs)->gs = (v); } while (0) | 
 | 193 | #define task_user_gs(tsk)	(task_pt_regs(tsk)->gs) | 
 | 194 | #define lazy_save_gs(v)		do { } while (0) | 
 | 195 | #define lazy_load_gs(v)		do { } while (0) | 
 | 196 | #endif	/* X86_32_LAZY_GS */ | 
 | 197 | #endif	/* X86_32 */ | 
| Tejun Heo | d9a89a2 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 198 |  | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 199 | static inline unsigned long get_limit(unsigned long segment) | 
 | 200 | { | 
 | 201 | 	unsigned long __limit; | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 202 | 	asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); | 
 | 203 | 	return __limit + 1; | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 204 | } | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 205 |  | 
 | 206 | static inline void native_clts(void) | 
 | 207 | { | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 208 | 	asm volatile("clts"); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 209 | } | 
 | 210 |  | 
 | 211 | /* | 
 | 212 |  * Volatile isn't enough to prevent the compiler from reordering the | 
 | 213 |  * read/write functions for the control registers and messing everything up. | 
 | 214 |  * A memory clobber would solve the problem, but would prevent reordering of | 
 | 215 |  * all loads stores around it, which can hurt performance. Solution is to | 
 | 216 |  * use a variable and mimic reads and writes to it to enforce serialization | 
 | 217 |  */ | 
 | 218 | static unsigned long __force_order; | 
 | 219 |  | 
 | 220 | static inline unsigned long native_read_cr0(void) | 
 | 221 | { | 
 | 222 | 	unsigned long val; | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 223 | 	asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 224 | 	return val; | 
 | 225 | } | 
 | 226 |  | 
 | 227 | static inline void native_write_cr0(unsigned long val) | 
 | 228 | { | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 229 | 	asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 230 | } | 
 | 231 |  | 
 | 232 | static inline unsigned long native_read_cr2(void) | 
 | 233 | { | 
 | 234 | 	unsigned long val; | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 235 | 	asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 236 | 	return val; | 
 | 237 | } | 
 | 238 |  | 
 | 239 | static inline void native_write_cr2(unsigned long val) | 
 | 240 | { | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 241 | 	asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 242 | } | 
 | 243 |  | 
 | 244 | static inline unsigned long native_read_cr3(void) | 
 | 245 | { | 
 | 246 | 	unsigned long val; | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 247 | 	asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 248 | 	return val; | 
 | 249 | } | 
 | 250 |  | 
 | 251 | static inline void native_write_cr3(unsigned long val) | 
 | 252 | { | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 253 | 	asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 254 | } | 
 | 255 |  | 
 | 256 | static inline unsigned long native_read_cr4(void) | 
 | 257 | { | 
 | 258 | 	unsigned long val; | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 259 | 	asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 260 | 	return val; | 
 | 261 | } | 
 | 262 |  | 
 | 263 | static inline unsigned long native_read_cr4_safe(void) | 
 | 264 | { | 
 | 265 | 	unsigned long val; | 
 | 266 | 	/* This could fault if %cr4 does not exist. In x86_64, a cr4 always | 
 | 267 | 	 * exists, so it will never fail. */ | 
 | 268 | #ifdef CONFIG_X86_32 | 
| H. Peter Anvin | 88976ee | 2008-02-04 16:47:58 +0100 | [diff] [blame] | 269 | 	asm volatile("1: mov %%cr4, %0\n" | 
 | 270 | 		     "2:\n" | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 271 | 		     _ASM_EXTABLE(1b, 2b) | 
| H. Peter Anvin | 88976ee | 2008-02-04 16:47:58 +0100 | [diff] [blame] | 272 | 		     : "=r" (val), "=m" (__force_order) : "0" (0)); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 273 | #else | 
 | 274 | 	val = native_read_cr4(); | 
 | 275 | #endif | 
 | 276 | 	return val; | 
 | 277 | } | 
 | 278 |  | 
 | 279 | static inline void native_write_cr4(unsigned long val) | 
 | 280 | { | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 281 | 	asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 282 | } | 
 | 283 |  | 
| Glauber de Oliveira Costa | 94ea03c | 2008-01-30 13:33:19 +0100 | [diff] [blame] | 284 | #ifdef CONFIG_X86_64 | 
 | 285 | static inline unsigned long native_read_cr8(void) | 
 | 286 | { | 
 | 287 | 	unsigned long cr8; | 
 | 288 | 	asm volatile("movq %%cr8,%0" : "=r" (cr8)); | 
 | 289 | 	return cr8; | 
 | 290 | } | 
 | 291 |  | 
 | 292 | static inline void native_write_cr8(unsigned long val) | 
 | 293 | { | 
 | 294 | 	asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); | 
 | 295 | } | 
 | 296 | #endif | 
 | 297 |  | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 298 | static inline void native_wbinvd(void) | 
 | 299 | { | 
 | 300 | 	asm volatile("wbinvd": : :"memory"); | 
 | 301 | } | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 302 |  | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 303 | #ifdef CONFIG_PARAVIRT | 
 | 304 | #include <asm/paravirt.h> | 
 | 305 | #else | 
 | 306 | #define read_cr0()	(native_read_cr0()) | 
 | 307 | #define write_cr0(x)	(native_write_cr0(x)) | 
 | 308 | #define read_cr2()	(native_read_cr2()) | 
 | 309 | #define write_cr2(x)	(native_write_cr2(x)) | 
 | 310 | #define read_cr3()	(native_read_cr3()) | 
 | 311 | #define write_cr3(x)	(native_write_cr3(x)) | 
 | 312 | #define read_cr4()	(native_read_cr4()) | 
 | 313 | #define read_cr4_safe()	(native_read_cr4_safe()) | 
 | 314 | #define write_cr4(x)	(native_write_cr4(x)) | 
 | 315 | #define wbinvd()	(native_wbinvd()) | 
| Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 316 | #ifdef CONFIG_X86_64 | 
| Glauber de Oliveira Costa | 94ea03c | 2008-01-30 13:33:19 +0100 | [diff] [blame] | 317 | #define read_cr8()	(native_read_cr8()) | 
 | 318 | #define write_cr8(x)	(native_write_cr8(x)) | 
| Jeremy Fitzhardinge | 9f9d489 | 2008-06-25 00:19:32 -0400 | [diff] [blame] | 319 | #define load_gs_index   native_load_gs_index | 
| Glauber de Oliveira Costa | d46d7d7 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 320 | #endif | 
 | 321 |  | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 322 | /* Clear the 'TS' bit */ | 
 | 323 | #define clts()		(native_clts()) | 
 | 324 |  | 
 | 325 | #endif/* CONFIG_PARAVIRT */ | 
 | 326 |  | 
| Jeremy Fitzhardinge | 4e09e21 | 2008-05-26 23:31:03 +0100 | [diff] [blame] | 327 | #define stts() write_cr0(read_cr0() | X86_CR0_TS) | 
| Glauber de Oliveira Costa | d3ca901 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 328 |  | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 329 | #endif /* __KERNEL__ */ | 
 | 330 |  | 
| H. Peter Anvin | 84fb144 | 2008-02-04 16:48:00 +0100 | [diff] [blame] | 331 | static inline void clflush(volatile void *__p) | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 332 | { | 
| H. Peter Anvin | 84fb144 | 2008-02-04 16:48:00 +0100 | [diff] [blame] | 333 | 	asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 334 | } | 
 | 335 |  | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 336 | #define nop() asm volatile ("nop") | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 337 |  | 
 | 338 | void disable_hlt(void); | 
 | 339 | void enable_hlt(void); | 
 | 340 |  | 
| Glauber de Oliveira Costa | d895422 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 341 | void cpu_idle_wait(void); | 
 | 342 |  | 
 | 343 | extern unsigned long arch_align_stack(unsigned long sp); | 
 | 344 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | 
 | 345 |  | 
 | 346 | void default_idle(void); | 
 | 347 |  | 
| Ivan Vecera | d3ec5ca | 2008-11-11 14:33:44 +0100 | [diff] [blame] | 348 | void stop_this_cpu(void *dummy); | 
 | 349 |  | 
| Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 350 | /* | 
 | 351 |  * Force strict CPU ordering. | 
 | 352 |  * And yes, this is required on UP too when we're talking | 
 | 353 |  * to devices. | 
 | 354 |  */ | 
 | 355 | #ifdef CONFIG_X86_32 | 
 | 356 | /* | 
| Pavel Machek | 0d7a181 | 2008-03-03 12:49:09 +0100 | [diff] [blame] | 357 |  * Some non-Intel clones support out of order store. wmb() ceases to be a | 
| Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 358 |  * nop for these. | 
 | 359 |  */ | 
 | 360 | #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) | 
 | 361 | #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) | 
 | 362 | #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) | 
 | 363 | #else | 
 | 364 | #define mb() 	asm volatile("mfence":::"memory") | 
 | 365 | #define rmb()	asm volatile("lfence":::"memory") | 
 | 366 | #define wmb()	asm volatile("sfence" ::: "memory") | 
 | 367 | #endif | 
 | 368 |  | 
 | 369 | /** | 
 | 370 |  * read_barrier_depends - Flush all pending reads that subsequents reads | 
 | 371 |  * depend on. | 
 | 372 |  * | 
 | 373 |  * No data-dependent reads from memory-like regions are ever reordered | 
 | 374 |  * over this barrier.  All reads preceding this primitive are guaranteed | 
 | 375 |  * to access memory (but not necessarily other CPUs' caches) before any | 
 | 376 |  * reads following this primitive that depend on the data return by | 
 | 377 |  * any of the preceding reads.  This primitive is much lighter weight than | 
 | 378 |  * rmb() on most CPUs, and is never heavier weight than is | 
 | 379 |  * rmb(). | 
 | 380 |  * | 
 | 381 |  * These ordering constraints are respected by both the local CPU | 
 | 382 |  * and the compiler. | 
 | 383 |  * | 
 | 384 |  * Ordering is not guaranteed by anything other than these primitives, | 
 | 385 |  * not even by data dependencies.  See the documentation for | 
 | 386 |  * memory_barrier() for examples and URLs to more information. | 
 | 387 |  * | 
 | 388 |  * For example, the following code would force ordering (the initial | 
 | 389 |  * value of "a" is zero, "b" is one, and "p" is "&a"): | 
 | 390 |  * | 
 | 391 |  * <programlisting> | 
 | 392 |  *	CPU 0				CPU 1 | 
 | 393 |  * | 
 | 394 |  *	b = 2; | 
 | 395 |  *	memory_barrier(); | 
 | 396 |  *	p = &b;				q = p; | 
 | 397 |  *					read_barrier_depends(); | 
 | 398 |  *					d = *q; | 
 | 399 |  * </programlisting> | 
 | 400 |  * | 
 | 401 |  * because the read of "*q" depends on the read of "p" and these | 
 | 402 |  * two reads are separated by a read_barrier_depends().  However, | 
 | 403 |  * the following code, with the same initial values for "a" and "b": | 
 | 404 |  * | 
 | 405 |  * <programlisting> | 
 | 406 |  *	CPU 0				CPU 1 | 
 | 407 |  * | 
 | 408 |  *	a = 2; | 
 | 409 |  *	memory_barrier(); | 
 | 410 |  *	b = 3;				y = b; | 
 | 411 |  *					read_barrier_depends(); | 
 | 412 |  *					x = a; | 
 | 413 |  * </programlisting> | 
 | 414 |  * | 
 | 415 |  * does not enforce ordering, since there is no data dependency between | 
 | 416 |  * the read of "a" and the read of "b".  Therefore, on some CPUs, such | 
 | 417 |  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb() | 
 | 418 |  * in cases like this where there are no data dependencies. | 
 | 419 |  **/ | 
 | 420 |  | 
 | 421 | #define read_barrier_depends()	do { } while (0) | 
 | 422 |  | 
 | 423 | #ifdef CONFIG_SMP | 
 | 424 | #define smp_mb()	mb() | 
 | 425 | #ifdef CONFIG_X86_PPRO_FENCE | 
 | 426 | # define smp_rmb()	rmb() | 
 | 427 | #else | 
 | 428 | # define smp_rmb()	barrier() | 
 | 429 | #endif | 
 | 430 | #ifdef CONFIG_X86_OOSTORE | 
 | 431 | # define smp_wmb() 	wmb() | 
 | 432 | #else | 
 | 433 | # define smp_wmb()	barrier() | 
 | 434 | #endif | 
 | 435 | #define smp_read_barrier_depends()	read_barrier_depends() | 
| Joe Perches | c5386c2 | 2008-03-23 01:03:39 -0700 | [diff] [blame] | 436 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 
| Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 437 | #else | 
 | 438 | #define smp_mb()	barrier() | 
 | 439 | #define smp_rmb()	barrier() | 
 | 440 | #define smp_wmb()	barrier() | 
 | 441 | #define smp_read_barrier_depends()	do { } while (0) | 
 | 442 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | 
 | 443 | #endif | 
 | 444 |  | 
| Andi Kleen | fde1b3f | 2008-01-30 13:32:38 +0100 | [diff] [blame] | 445 | /* | 
 | 446 |  * Stop RDTSC speculation. This is needed when you need to use RDTSC | 
 | 447 |  * (or get_cycles or vread that possibly accesses the TSC) in a defined | 
 | 448 |  * code region. | 
 | 449 |  * | 
 | 450 |  * (Could use an alternative three way for this if there was one.) | 
 | 451 |  */ | 
 | 452 | static inline void rdtsc_barrier(void) | 
 | 453 | { | 
 | 454 | 	alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); | 
 | 455 | 	alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); | 
 | 456 | } | 
| Glauber de Oliveira Costa | 833d846 | 2008-01-30 13:31:08 +0100 | [diff] [blame] | 457 |  | 
| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 458 | #endif /* _ASM_X86_SYSTEM_H */ |