| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 1 | #ifndef __ASM_X86_PROCESSOR_H | 
 | 2 | #define __ASM_X86_PROCESSOR_H | 
 | 3 |  | 
| Glauber de Oliveira Costa | 053de04 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 4 | #include <asm/processor-flags.h> | 
 | 5 |  | 
| Ingo Molnar | 58f6f6e | 2008-01-30 13:32:54 +0100 | [diff] [blame] | 6 | /* migration helpers, for KVM - will be removed in 2.6.25: */ | 
 | 7 | #include <asm/vm86.h> | 
 | 8 | #define Xgt_desc_struct	desc_ptr | 
 | 9 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 10 | /* Forward declaration, a strange C thing */ | 
 | 11 | struct task_struct; | 
 | 12 | struct mm_struct; | 
 | 13 |  | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 14 | #include <asm/vm86.h> | 
 | 15 | #include <asm/math_emu.h> | 
 | 16 | #include <asm/segment.h> | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 17 | #include <asm/types.h> | 
 | 18 | #include <asm/sigcontext.h> | 
 | 19 | #include <asm/current.h> | 
 | 20 | #include <asm/cpufeature.h> | 
 | 21 | #include <asm/system.h> | 
| Glauber de Oliveira Costa | c72dcf8 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 22 | #include <asm/page.h> | 
| Glauber de Oliveira Costa | ca241c7 | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 23 | #include <asm/percpu.h> | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 24 | #include <asm/msr.h> | 
 | 25 | #include <asm/desc_defs.h> | 
| Andi Kleen | bd61643 | 2008-01-30 13:32:38 +0100 | [diff] [blame] | 26 | #include <asm/nops.h> | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 27 | #include <linux/personality.h> | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 28 | #include <linux/cpumask.h> | 
 | 29 | #include <linux/cache.h> | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 30 | #include <linux/threads.h> | 
 | 31 | #include <linux/init.h> | 
| Glauber de Oliveira Costa | c72dcf8 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 32 |  | 
| Glauber de Oliveira Costa | 0ccb8ac | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 33 | /* | 
 | 34 |  * Default implementation of macro that returns current | 
 | 35 |  * instruction pointer ("program counter"). | 
 | 36 |  */ | 
 | 37 | static inline void *current_text_addr(void) | 
 | 38 | { | 
 | 39 | 	void *pc; | 
 | 40 | 	asm volatile("mov $1f,%0\n1:":"=r" (pc)); | 
 | 41 | 	return pc; | 
 | 42 | } | 
 | 43 |  | 
| Glauber de Oliveira Costa | dbcb466 | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 44 | #ifdef CONFIG_X86_VSMP | 
 | 45 | #define ARCH_MIN_TASKALIGN	(1 << INTERNODE_CACHE_SHIFT) | 
 | 46 | #define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT) | 
 | 47 | #else | 
 | 48 | #define ARCH_MIN_TASKALIGN	16 | 
 | 49 | #define ARCH_MIN_MMSTRUCT_ALIGN	0 | 
 | 50 | #endif | 
 | 51 |  | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 52 | /* | 
 | 53 |  *  CPU type and hardware bug flags. Kept separately for each CPU. | 
 | 54 |  *  Members of this structure are referenced in head.S, so think twice | 
 | 55 |  *  before touching them. [mj] | 
 | 56 |  */ | 
 | 57 |  | 
 | 58 | struct cpuinfo_x86 { | 
 | 59 | 	__u8	x86;		/* CPU family */ | 
 | 60 | 	__u8	x86_vendor;	/* CPU vendor */ | 
 | 61 | 	__u8	x86_model; | 
 | 62 | 	__u8	x86_mask; | 
 | 63 | #ifdef CONFIG_X86_32 | 
 | 64 | 	char	wp_works_ok;	/* It doesn't on 386's */ | 
 | 65 | 	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */ | 
 | 66 | 	char	hard_math; | 
 | 67 | 	char	rfu; | 
 | 68 | 	char	fdiv_bug; | 
 | 69 | 	char	f00f_bug; | 
 | 70 | 	char	coma_bug; | 
 | 71 | 	char	pad0; | 
 | 72 | #else | 
 | 73 | 	/* number of 4K pages in DTLB/ITLB combined(in pages)*/ | 
 | 74 | 	int     x86_tlbsize; | 
 | 75 | 	__u8    x86_virt_bits, x86_phys_bits; | 
 | 76 | 	/* cpuid returned core id bits */ | 
 | 77 | 	__u8    x86_coreid_bits; | 
 | 78 | 	/* Max extended CPUID function supported */ | 
 | 79 | 	__u32   extended_cpuid_level; | 
 | 80 | #endif | 
 | 81 | 	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */ | 
 | 82 | 	__u32	x86_capability[NCAPINTS]; | 
 | 83 | 	char	x86_vendor_id[16]; | 
 | 84 | 	char	x86_model_id[64]; | 
 | 85 | 	int 	x86_cache_size;  /* in KB - valid for CPUS which support this | 
 | 86 | 				    call  */ | 
 | 87 | 	int 	x86_cache_alignment;	/* In bytes */ | 
 | 88 | 	int	x86_power; | 
 | 89 | 	unsigned long loops_per_jiffy; | 
 | 90 | #ifdef CONFIG_SMP | 
 | 91 | 	cpumask_t llc_shared_map;	/* cpus sharing the last level cache */ | 
 | 92 | #endif | 
| travis@sgi.com | ef97001 | 2008-01-30 13:33:10 +0100 | [diff] [blame] | 93 | 	u16 x86_max_cores;		/* cpuid returned max cores value */ | 
 | 94 | 	u16 apicid; | 
 | 95 | 	u16 x86_clflush_size; | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 96 | #ifdef CONFIG_SMP | 
| travis@sgi.com | ef97001 | 2008-01-30 13:33:10 +0100 | [diff] [blame] | 97 | 	u16 booted_cores;		/* number of cores as seen by OS */ | 
 | 98 | 	u16 phys_proc_id; 		/* Physical processor id. */ | 
 | 99 | 	u16 cpu_core_id;  		/* Core id */ | 
 | 100 | 	u16 cpu_index;			/* index into per_cpu list */ | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 101 | #endif | 
 | 102 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | 
 | 103 |  | 
 | 104 | #define X86_VENDOR_INTEL 0 | 
 | 105 | #define X86_VENDOR_CYRIX 1 | 
 | 106 | #define X86_VENDOR_AMD 2 | 
 | 107 | #define X86_VENDOR_UMC 3 | 
 | 108 | #define X86_VENDOR_NEXGEN 4 | 
 | 109 | #define X86_VENDOR_CENTAUR 5 | 
 | 110 | #define X86_VENDOR_TRANSMETA 7 | 
 | 111 | #define X86_VENDOR_NSC 8 | 
 | 112 | #define X86_VENDOR_NUM 9 | 
 | 113 | #define X86_VENDOR_UNKNOWN 0xff | 
 | 114 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 115 | /* | 
 | 116 |  * capabilities of CPUs | 
 | 117 |  */ | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 118 | extern struct cpuinfo_x86 boot_cpu_data; | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 119 | extern struct cpuinfo_x86 new_cpu_data; | 
 | 120 | extern struct tss_struct doublefault_tss; | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 121 |  | 
 | 122 | #ifdef CONFIG_SMP | 
 | 123 | DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | 
 | 124 | #define cpu_data(cpu)		per_cpu(cpu_info, cpu) | 
 | 125 | #define current_cpu_data	cpu_data(smp_processor_id()) | 
 | 126 | #else | 
 | 127 | #define cpu_data(cpu)		boot_cpu_data | 
 | 128 | #define current_cpu_data	boot_cpu_data | 
 | 129 | #endif | 
 | 130 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 131 | void cpu_detect(struct cpuinfo_x86 *c); | 
 | 132 |  | 
 | 133 | extern void identify_cpu(struct cpuinfo_x86 *); | 
 | 134 | extern void identify_boot_cpu(void); | 
 | 135 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | 
| Glauber de Oliveira Costa | 5300db8 | 2008-01-30 13:31:33 +0100 | [diff] [blame] | 136 | extern void print_cpu_info(struct cpuinfo_x86 *); | 
 | 137 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | 
 | 138 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 
 | 139 | extern unsigned short num_cache_leaves; | 
 | 140 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 141 | #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) | 
 | 142 | extern void detect_ht(struct cpuinfo_x86 *c); | 
 | 143 | #else | 
 | 144 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | 
 | 145 | #endif | 
 | 146 |  | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 147 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | 
 | 148 | 					 unsigned int *ecx, unsigned int *edx) | 
 | 149 | { | 
 | 150 | 	/* ecx is often an input as well as an output. */ | 
 | 151 | 	__asm__("cpuid" | 
 | 152 | 		: "=a" (*eax), | 
 | 153 | 		  "=b" (*ebx), | 
 | 154 | 		  "=c" (*ecx), | 
 | 155 | 		  "=d" (*edx) | 
 | 156 | 		: "0" (*eax), "2" (*ecx)); | 
 | 157 | } | 
 | 158 |  | 
| Glauber de Oliveira Costa | c72dcf8 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 159 | static inline void load_cr3(pgd_t *pgdir) | 
 | 160 | { | 
 | 161 | 	write_cr3(__pa(pgdir)); | 
 | 162 | } | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 163 |  | 
| Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 164 | #ifdef CONFIG_X86_32 | 
| Glauber de Oliveira Costa | ca241c7 | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 165 | /* This is the TSS defined by the hardware. */ | 
 | 166 | struct x86_hw_tss { | 
 | 167 | 	unsigned short	back_link, __blh; | 
 | 168 | 	unsigned long	sp0; | 
 | 169 | 	unsigned short	ss0, __ss0h; | 
 | 170 | 	unsigned long	sp1; | 
 | 171 | 	unsigned short	ss1, __ss1h;	/* ss1 caches MSR_IA32_SYSENTER_CS */ | 
 | 172 | 	unsigned long	sp2; | 
 | 173 | 	unsigned short	ss2, __ss2h; | 
 | 174 | 	unsigned long	__cr3; | 
 | 175 | 	unsigned long	ip; | 
 | 176 | 	unsigned long	flags; | 
 | 177 | 	unsigned long	ax, cx, dx, bx; | 
 | 178 | 	unsigned long	sp, bp, si, di; | 
 | 179 | 	unsigned short	es, __esh; | 
 | 180 | 	unsigned short	cs, __csh; | 
 | 181 | 	unsigned short	ss, __ssh; | 
 | 182 | 	unsigned short	ds, __dsh; | 
 | 183 | 	unsigned short	fs, __fsh; | 
 | 184 | 	unsigned short	gs, __gsh; | 
 | 185 | 	unsigned short	ldt, __ldth; | 
 | 186 | 	unsigned short	trace, io_bitmap_base; | 
 | 187 | } __attribute__((packed)); | 
 | 188 | #else | 
 | 189 | struct x86_hw_tss { | 
 | 190 | 	u32 reserved1; | 
 | 191 | 	u64 sp0; | 
 | 192 | 	u64 sp1; | 
 | 193 | 	u64 sp2; | 
 | 194 | 	u64 reserved2; | 
 | 195 | 	u64 ist[7]; | 
 | 196 | 	u32 reserved3; | 
 | 197 | 	u32 reserved4; | 
 | 198 | 	u16 reserved5; | 
 | 199 | 	u16 io_bitmap_base; | 
 | 200 | } __attribute__((packed)) ____cacheline_aligned; | 
 | 201 | #endif | 
 | 202 |  | 
 | 203 | /* | 
 | 204 |  * Size of io_bitmap. | 
 | 205 |  */ | 
 | 206 | #define IO_BITMAP_BITS  65536 | 
 | 207 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | 
 | 208 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | 
 | 209 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | 
 | 210 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | 
 | 211 | #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 | 
 | 212 |  | 
 | 213 | struct tss_struct { | 
 | 214 | 	struct x86_hw_tss x86_tss; | 
 | 215 |  | 
 | 216 | 	/* | 
 | 217 | 	 * The extra 1 is there because the CPU will access an | 
 | 218 | 	 * additional byte beyond the end of the IO permission | 
 | 219 | 	 * bitmap. The extra byte must be all 1 bits, and must | 
 | 220 | 	 * be within the limit. | 
 | 221 | 	 */ | 
 | 222 | 	unsigned long	io_bitmap[IO_BITMAP_LONGS + 1]; | 
 | 223 | 	/* | 
 | 224 | 	 * Cache the current maximum and the last task that used the bitmap: | 
 | 225 | 	 */ | 
 | 226 | 	unsigned long io_bitmap_max; | 
 | 227 | 	struct thread_struct *io_bitmap_owner; | 
 | 228 | 	/* | 
 | 229 | 	 * pads the TSS to be cacheline-aligned (size is 0x100) | 
 | 230 | 	 */ | 
 | 231 | 	unsigned long __cacheline_filler[35]; | 
 | 232 | 	/* | 
 | 233 | 	 * .. and then another 0x100 bytes for emergency kernel stack | 
 | 234 | 	 */ | 
 | 235 | 	unsigned long stack[64]; | 
 | 236 | } __attribute__((packed)); | 
 | 237 |  | 
 | 238 | DECLARE_PER_CPU(struct tss_struct, init_tss); | 
 | 239 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 240 | /* Save the original ist values for checking stack pointers during debugging */ | 
 | 241 | struct orig_ist { | 
 | 242 | 	unsigned long ist[7]; | 
 | 243 | }; | 
 | 244 |  | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 245 | #define	MXCSR_DEFAULT		0x1f80 | 
 | 246 |  | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 247 | struct i387_fsave_struct { | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 248 | 	u32	cwd; | 
 | 249 | 	u32	swd; | 
 | 250 | 	u32	twd; | 
 | 251 | 	u32	fip; | 
 | 252 | 	u32	fcs; | 
 | 253 | 	u32	foo; | 
 | 254 | 	u32	fos; | 
 | 255 | 	u32	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */ | 
 | 256 | 	u32	status;		/* software status information */ | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 257 | }; | 
 | 258 |  | 
 | 259 | struct i387_fxsave_struct { | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 260 | 	u16	cwd; | 
 | 261 | 	u16	swd; | 
 | 262 | 	u16	twd; | 
 | 263 | 	u16	fop; | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 264 | 	union { | 
 | 265 | 		struct { | 
 | 266 | 			u64	rip; | 
 | 267 | 			u64	rdp; | 
 | 268 | 		}; | 
 | 269 | 		struct { | 
 | 270 | 			u32	fip; | 
 | 271 | 			u32	fcs; | 
 | 272 | 			u32	foo; | 
 | 273 | 			u32	fos; | 
 | 274 | 		}; | 
 | 275 | 	}; | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 276 | 	u32	mxcsr; | 
 | 277 | 	u32	mxcsr_mask; | 
 | 278 | 	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */ | 
 | 279 | 	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 256 bytes */ | 
 | 280 | 	u32	padding[24]; | 
 | 281 | } __attribute__((aligned(16))); | 
 | 282 |  | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 283 | struct i387_soft_struct { | 
 | 284 | 	u32	cwd; | 
 | 285 | 	u32	swd; | 
 | 286 | 	u32	twd; | 
 | 287 | 	u32	fip; | 
 | 288 | 	u32	fcs; | 
 | 289 | 	u32	foo; | 
 | 290 | 	u32	fos; | 
 | 291 | 	u32	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */ | 
 | 292 | 	u8	ftop, changed, lookahead, no_update, rm, alimit; | 
 | 293 | 	struct info	*info; | 
 | 294 | 	u32	entry_eip; | 
| Glauber de Oliveira Costa | 46265df | 2008-01-30 13:31:41 +0100 | [diff] [blame] | 295 | }; | 
 | 296 |  | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 297 | union i387_union { | 
 | 298 | 	struct i387_fsave_struct	fsave; | 
 | 299 | 	struct i387_fxsave_struct	fxsave; | 
 | 300 | 	struct i387_soft_struct 	soft; | 
 | 301 | }; | 
 | 302 |  | 
 | 303 | #ifdef CONFIG_X86_32 | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 304 | /* | 
 | 305 |  * the following now lives in the per cpu area: | 
 | 306 |  * extern	int cpu_llc_id[NR_CPUS]; | 
 | 307 |  */ | 
 | 308 | DECLARE_PER_CPU(u8, cpu_llc_id); | 
| Roland McGrath | 99f8ecd | 2008-01-30 13:31:48 +0100 | [diff] [blame] | 309 | #else | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 310 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | 
| Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 311 | #endif | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 312 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 313 | extern void print_cpu_info(struct cpuinfo_x86 *); | 
 | 314 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | 
 | 315 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 
 | 316 | extern unsigned short num_cache_leaves; | 
 | 317 |  | 
| Glauber de Oliveira Costa | cb38d37 | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 318 | struct thread_struct { | 
 | 319 | /* cached TLS descriptors. */ | 
 | 320 | 	struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 
 | 321 | 	unsigned long	sp0; | 
 | 322 | 	unsigned long	sp; | 
 | 323 | #ifdef CONFIG_X86_32 | 
 | 324 | 	unsigned long	sysenter_cs; | 
 | 325 | #else | 
 | 326 | 	unsigned long 	usersp;	/* Copy from PDA */ | 
 | 327 | 	unsigned short	es, ds, fsindex, gsindex; | 
 | 328 | #endif | 
 | 329 | 	unsigned long	ip; | 
 | 330 | 	unsigned long	fs; | 
 | 331 | 	unsigned long	gs; | 
 | 332 | /* Hardware debugging registers */ | 
 | 333 | 	unsigned long	debugreg0; | 
 | 334 | 	unsigned long	debugreg1; | 
 | 335 | 	unsigned long	debugreg2; | 
 | 336 | 	unsigned long	debugreg3; | 
 | 337 | 	unsigned long	debugreg6; | 
 | 338 | 	unsigned long	debugreg7; | 
 | 339 | /* fault info */ | 
 | 340 | 	unsigned long	cr2, trap_no, error_code; | 
 | 341 | /* floating point info */ | 
 | 342 | 	union i387_union	i387 __attribute__((aligned(16)));; | 
 | 343 | #ifdef CONFIG_X86_32 | 
 | 344 | /* virtual 86 mode info */ | 
 | 345 | 	struct vm86_struct __user *vm86_info; | 
 | 346 | 	unsigned long		screen_bitmap; | 
 | 347 | 	unsigned long		v86flags, v86mask, saved_sp0; | 
 | 348 | 	unsigned int		saved_fs, saved_gs; | 
 | 349 | #endif | 
 | 350 | /* IO permissions */ | 
 | 351 | 	unsigned long	*io_bitmap_ptr; | 
 | 352 | 	unsigned long	iopl; | 
 | 353 | /* max allowed port in the bitmap, in bytes: */ | 
 | 354 | 	unsigned io_bitmap_max; | 
 | 355 | /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set.  */ | 
 | 356 | 	unsigned long	debugctlmsr; | 
 | 357 | /* Debug Store - if not 0 points to a DS Save Area configuration; | 
 | 358 |  *               goes into MSR_IA32_DS_AREA */ | 
 | 359 | 	unsigned long	ds_area_msr; | 
 | 360 | }; | 
 | 361 |  | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 362 | static inline unsigned long native_get_debugreg(int regno) | 
 | 363 | { | 
 | 364 | 	unsigned long val = 0; 	/* Damn you, gcc! */ | 
 | 365 |  | 
 | 366 | 	switch (regno) { | 
 | 367 | 	case 0: | 
 | 368 | 		asm("mov %%db0, %0" :"=r" (val)); break; | 
 | 369 | 	case 1: | 
 | 370 | 		asm("mov %%db1, %0" :"=r" (val)); break; | 
 | 371 | 	case 2: | 
 | 372 | 		asm("mov %%db2, %0" :"=r" (val)); break; | 
 | 373 | 	case 3: | 
 | 374 | 		asm("mov %%db3, %0" :"=r" (val)); break; | 
 | 375 | 	case 6: | 
 | 376 | 		asm("mov %%db6, %0" :"=r" (val)); break; | 
 | 377 | 	case 7: | 
 | 378 | 		asm("mov %%db7, %0" :"=r" (val)); break; | 
 | 379 | 	default: | 
 | 380 | 		BUG(); | 
 | 381 | 	} | 
 | 382 | 	return val; | 
 | 383 | } | 
 | 384 |  | 
 | 385 | static inline void native_set_debugreg(int regno, unsigned long value) | 
 | 386 | { | 
 | 387 | 	switch (regno) { | 
 | 388 | 	case 0: | 
 | 389 | 		asm("mov %0,%%db0"	: /* no output */ :"r" (value)); | 
 | 390 | 		break; | 
 | 391 | 	case 1: | 
 | 392 | 		asm("mov %0,%%db1"	: /* no output */ :"r" (value)); | 
 | 393 | 		break; | 
 | 394 | 	case 2: | 
 | 395 | 		asm("mov %0,%%db2"	: /* no output */ :"r" (value)); | 
 | 396 | 		break; | 
 | 397 | 	case 3: | 
 | 398 | 		asm("mov %0,%%db3"	: /* no output */ :"r" (value)); | 
 | 399 | 		break; | 
 | 400 | 	case 6: | 
 | 401 | 		asm("mov %0,%%db6"	: /* no output */ :"r" (value)); | 
 | 402 | 		break; | 
 | 403 | 	case 7: | 
 | 404 | 		asm("mov %0,%%db7"	: /* no output */ :"r" (value)); | 
 | 405 | 		break; | 
 | 406 | 	default: | 
 | 407 | 		BUG(); | 
 | 408 | 	} | 
 | 409 | } | 
 | 410 |  | 
| Glauber de Oliveira Costa | 62d7d7e | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 411 | /* | 
 | 412 |  * Set IOPL bits in EFLAGS from given mask | 
 | 413 |  */ | 
 | 414 | static inline void native_set_iopl_mask(unsigned mask) | 
 | 415 | { | 
 | 416 | #ifdef CONFIG_X86_32 | 
 | 417 | 	unsigned int reg; | 
 | 418 | 	__asm__ __volatile__ ("pushfl;" | 
 | 419 | 			      "popl %0;" | 
 | 420 | 			      "andl %1, %0;" | 
 | 421 | 			      "orl %2, %0;" | 
 | 422 | 			      "pushl %0;" | 
 | 423 | 			      "popfl" | 
 | 424 | 				: "=&r" (reg) | 
 | 425 | 				: "i" (~X86_EFLAGS_IOPL), "r" (mask)); | 
 | 426 | #endif | 
 | 427 | } | 
 | 428 |  | 
| Glauber de Oliveira Costa | 7818a1e | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 429 | static inline void native_load_sp0(struct tss_struct *tss, | 
 | 430 | 				   struct thread_struct *thread) | 
 | 431 | { | 
 | 432 | 	tss->x86_tss.sp0 = thread->sp0; | 
 | 433 | #ifdef CONFIG_X86_32 | 
 | 434 | 	/* Only happens when SEP is enabled, no need to test "SEP"arately */ | 
 | 435 | 	if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { | 
 | 436 | 		tss->x86_tss.ss1 = thread->sysenter_cs; | 
 | 437 | 		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | 
 | 438 | 	} | 
 | 439 | #endif | 
 | 440 | } | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 441 |  | 
| Glauber de Oliveira Costa | e801f86 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 442 | static inline void native_swapgs(void) | 
 | 443 | { | 
 | 444 | #ifdef CONFIG_X86_64 | 
 | 445 | 	asm volatile("swapgs" ::: "memory"); | 
 | 446 | #endif | 
 | 447 | } | 
 | 448 |  | 
| Glauber de Oliveira Costa | 7818a1e | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 449 | #ifdef CONFIG_PARAVIRT | 
 | 450 | #include <asm/paravirt.h> | 
 | 451 | #else | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 452 | #define __cpuid native_cpuid | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 453 | #define paravirt_enabled() 0 | 
 | 454 |  | 
 | 455 | /* | 
 | 456 |  * These special macros can be used to get or set a debugging register | 
 | 457 |  */ | 
 | 458 | #define get_debugreg(var, register)				\ | 
 | 459 | 	(var) = native_get_debugreg(register) | 
 | 460 | #define set_debugreg(value, register)				\ | 
 | 461 | 	native_set_debugreg(register, value) | 
 | 462 |  | 
| Glauber de Oliveira Costa | 7818a1e | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 463 | static inline void load_sp0(struct tss_struct *tss, | 
 | 464 | 			    struct thread_struct *thread) | 
 | 465 | { | 
 | 466 | 	native_load_sp0(tss, thread); | 
 | 467 | } | 
 | 468 |  | 
| Glauber de Oliveira Costa | 62d7d7e | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 469 | #define set_iopl_mask native_set_iopl_mask | 
| Glauber de Oliveira Costa | e801f86 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 470 | #define SWAPGS	swapgs | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 471 | #endif /* CONFIG_PARAVIRT */ | 
 | 472 |  | 
 | 473 | /* | 
 | 474 |  * Save the cr4 feature set we're using (ie | 
 | 475 |  * Pentium 4MB enable and PPro Global page | 
 | 476 |  * enable), so that any CPU's that boot up | 
 | 477 |  * after us can get the correct flags. | 
 | 478 |  */ | 
 | 479 | extern unsigned long mmu_cr4_features; | 
 | 480 |  | 
 | 481 | static inline void set_in_cr4(unsigned long mask) | 
 | 482 | { | 
 | 483 | 	unsigned cr4; | 
 | 484 | 	mmu_cr4_features |= mask; | 
 | 485 | 	cr4 = read_cr4(); | 
 | 486 | 	cr4 |= mask; | 
 | 487 | 	write_cr4(cr4); | 
 | 488 | } | 
 | 489 |  | 
 | 490 | static inline void clear_in_cr4(unsigned long mask) | 
 | 491 | { | 
 | 492 | 	unsigned cr4; | 
 | 493 | 	mmu_cr4_features &= ~mask; | 
 | 494 | 	cr4 = read_cr4(); | 
 | 495 | 	cr4 &= ~mask; | 
 | 496 | 	write_cr4(cr4); | 
 | 497 | } | 
 | 498 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 499 | struct microcode_header { | 
 | 500 | 	unsigned int hdrver; | 
 | 501 | 	unsigned int rev; | 
 | 502 | 	unsigned int date; | 
 | 503 | 	unsigned int sig; | 
 | 504 | 	unsigned int cksum; | 
 | 505 | 	unsigned int ldrver; | 
 | 506 | 	unsigned int pf; | 
 | 507 | 	unsigned int datasize; | 
 | 508 | 	unsigned int totalsize; | 
 | 509 | 	unsigned int reserved[3]; | 
 | 510 | }; | 
| Glauber de Oliveira Costa | 1b46cbe | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 511 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 512 | struct microcode { | 
 | 513 | 	struct microcode_header hdr; | 
 | 514 | 	unsigned int bits[0]; | 
 | 515 | }; | 
 | 516 |  | 
 | 517 | typedef struct microcode microcode_t; | 
 | 518 | typedef struct microcode_header microcode_header_t; | 
 | 519 |  | 
 | 520 | /* microcode format is extended from prescott processors */ | 
 | 521 | struct extended_signature { | 
 | 522 | 	unsigned int sig; | 
 | 523 | 	unsigned int pf; | 
 | 524 | 	unsigned int cksum; | 
 | 525 | }; | 
 | 526 |  | 
 | 527 | struct extended_sigtable { | 
 | 528 | 	unsigned int count; | 
 | 529 | 	unsigned int cksum; | 
 | 530 | 	unsigned int reserved[3]; | 
 | 531 | 	struct extended_signature sigs[0]; | 
 | 532 | }; | 
 | 533 |  | 
| Glauber de Oliveira Costa | fc87e90 | 2008-01-30 13:31:38 +0100 | [diff] [blame] | 534 | typedef struct { | 
 | 535 | 	unsigned long seg; | 
 | 536 | } mm_segment_t; | 
 | 537 |  | 
 | 538 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 539 | /* | 
 | 540 |  * create a kernel thread without removing it from tasklists | 
 | 541 |  */ | 
 | 542 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | 
 | 543 |  | 
 | 544 | /* Free all resources held by a thread. */ | 
 | 545 | extern void release_thread(struct task_struct *); | 
 | 546 |  | 
 | 547 | /* Prepare to copy thread state - unlazy all lazy status */ | 
 | 548 | extern void prepare_to_copy(struct task_struct *tsk); | 
 | 549 |  | 
 | 550 | unsigned long get_wchan(struct task_struct *p); | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 551 |  | 
 | 552 | /* | 
 | 553 |  * Generic CPUID function | 
 | 554 |  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | 
 | 555 |  * resulting in stale register contents being returned. | 
 | 556 |  */ | 
 | 557 | static inline void cpuid(unsigned int op, | 
 | 558 | 			 unsigned int *eax, unsigned int *ebx, | 
 | 559 | 			 unsigned int *ecx, unsigned int *edx) | 
 | 560 | { | 
 | 561 | 	*eax = op; | 
 | 562 | 	*ecx = 0; | 
 | 563 | 	__cpuid(eax, ebx, ecx, edx); | 
 | 564 | } | 
 | 565 |  | 
 | 566 | /* Some CPUID calls want 'count' to be placed in ecx */ | 
 | 567 | static inline void cpuid_count(unsigned int op, int count, | 
 | 568 | 			       unsigned int *eax, unsigned int *ebx, | 
 | 569 | 			       unsigned int *ecx, unsigned int *edx) | 
 | 570 | { | 
 | 571 | 	*eax = op; | 
 | 572 | 	*ecx = count; | 
 | 573 | 	__cpuid(eax, ebx, ecx, edx); | 
 | 574 | } | 
 | 575 |  | 
 | 576 | /* | 
 | 577 |  * CPUID functions returning a single datum | 
 | 578 |  */ | 
 | 579 | static inline unsigned int cpuid_eax(unsigned int op) | 
 | 580 | { | 
 | 581 | 	unsigned int eax, ebx, ecx, edx; | 
 | 582 |  | 
 | 583 | 	cpuid(op, &eax, &ebx, &ecx, &edx); | 
 | 584 | 	return eax; | 
 | 585 | } | 
 | 586 | static inline unsigned int cpuid_ebx(unsigned int op) | 
 | 587 | { | 
 | 588 | 	unsigned int eax, ebx, ecx, edx; | 
 | 589 |  | 
 | 590 | 	cpuid(op, &eax, &ebx, &ecx, &edx); | 
 | 591 | 	return ebx; | 
 | 592 | } | 
 | 593 | static inline unsigned int cpuid_ecx(unsigned int op) | 
 | 594 | { | 
 | 595 | 	unsigned int eax, ebx, ecx, edx; | 
 | 596 |  | 
 | 597 | 	cpuid(op, &eax, &ebx, &ecx, &edx); | 
 | 598 | 	return ecx; | 
 | 599 | } | 
 | 600 | static inline unsigned int cpuid_edx(unsigned int op) | 
 | 601 | { | 
 | 602 | 	unsigned int eax, ebx, ecx, edx; | 
 | 603 |  | 
 | 604 | 	cpuid(op, &eax, &ebx, &ecx, &edx); | 
 | 605 | 	return edx; | 
 | 606 | } | 
 | 607 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 608 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | 
 | 609 | static inline void rep_nop(void) | 
 | 610 | { | 
 | 611 | 	__asm__ __volatile__("rep;nop": : :"memory"); | 
 | 612 | } | 
 | 613 |  | 
 | 614 | /* Stop speculative execution */ | 
 | 615 | static inline void sync_core(void) | 
 | 616 | { | 
 | 617 | 	int tmp; | 
 | 618 | 	asm volatile("cpuid" : "=a" (tmp) : "0" (1) | 
 | 619 | 					  : "ebx", "ecx", "edx", "memory"); | 
 | 620 | } | 
 | 621 |  | 
 | 622 | #define cpu_relax()   rep_nop() | 
 | 623 |  | 
 | 624 | static inline void __monitor(const void *eax, unsigned long ecx, | 
 | 625 | 		unsigned long edx) | 
 | 626 | { | 
 | 627 | 	/* "monitor %eax,%ecx,%edx;" */ | 
 | 628 | 	asm volatile( | 
 | 629 | 		".byte 0x0f,0x01,0xc8;" | 
 | 630 | 		: :"a" (eax), "c" (ecx), "d"(edx)); | 
 | 631 | } | 
 | 632 |  | 
 | 633 | static inline void __mwait(unsigned long eax, unsigned long ecx) | 
 | 634 | { | 
 | 635 | 	/* "mwait %eax,%ecx;" */ | 
 | 636 | 	asm volatile( | 
 | 637 | 		".byte 0x0f,0x01,0xc9;" | 
 | 638 | 		: :"a" (eax), "c" (ecx)); | 
 | 639 | } | 
 | 640 |  | 
 | 641 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | 
 | 642 | { | 
 | 643 | 	/* "mwait %eax,%ecx;" */ | 
 | 644 | 	asm volatile( | 
 | 645 | 		"sti; .byte 0x0f,0x01,0xc9;" | 
 | 646 | 		: :"a" (eax), "c" (ecx)); | 
 | 647 | } | 
 | 648 |  | 
 | 649 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 
 | 650 |  | 
 | 651 | extern int force_mwait; | 
 | 652 |  | 
 | 653 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 
 | 654 |  | 
 | 655 | extern unsigned long boot_option_idle_override; | 
 | 656 |  | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 657 | extern void enable_sep_cpu(void); | 
 | 658 | extern int sysenter_setup(void); | 
 | 659 |  | 
 | 660 | /* Defined in head.S */ | 
 | 661 | extern struct desc_ptr early_gdt_descr; | 
 | 662 |  | 
 | 663 | extern void cpu_set_gdt(int); | 
 | 664 | extern void switch_to_new_gdt(void); | 
 | 665 | extern void cpu_init(void); | 
 | 666 | extern void init_gdt(int cpu); | 
 | 667 |  | 
 | 668 | /* from system description table in BIOS.  Mostly for MCA use, but | 
 | 669 |  * others may find it useful. */ | 
 | 670 | extern unsigned int machine_id; | 
 | 671 | extern unsigned int machine_submodel_id; | 
 | 672 | extern unsigned int BIOS_revision; | 
 | 673 | extern unsigned int mca_pentium_flag; | 
 | 674 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 675 | /* Boot loader type from the setup header */ | 
 | 676 | extern int bootloader_type; | 
| Glauber de Oliveira Costa | 1a53905 | 2008-01-30 13:31:39 +0100 | [diff] [blame] | 677 |  | 
 | 678 | extern char ignore_fpu_irq; | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 679 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | 
 | 680 |  | 
 | 681 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | 
 | 682 | #define ARCH_HAS_PREFETCHW | 
 | 683 | #define ARCH_HAS_SPINLOCK_PREFETCH | 
 | 684 |  | 
| Glauber de Oliveira Costa | ae2e15e | 2008-01-30 13:31:40 +0100 | [diff] [blame] | 685 | #ifdef CONFIG_X86_32 | 
 | 686 | #define BASE_PREFETCH	ASM_NOP4 | 
 | 687 | #define ARCH_HAS_PREFETCH | 
 | 688 | #else | 
 | 689 | #define BASE_PREFETCH	"prefetcht0 (%1)" | 
 | 690 | #endif | 
 | 691 |  | 
 | 692 | /* Prefetch instructions for Pentium III and AMD Athlon */ | 
 | 693 | /* It's not worth to care about 3dnow! prefetches for the K6 | 
 | 694 |    because they are microcoded there and very slow. | 
 | 695 |    However we don't do prefetches for pre XP Athlons currently | 
 | 696 |    That should be fixed. */ | 
 | 697 | static inline void prefetch(const void *x) | 
 | 698 | { | 
 | 699 | 	alternative_input(BASE_PREFETCH, | 
 | 700 | 			  "prefetchnta (%1)", | 
 | 701 | 			  X86_FEATURE_XMM, | 
 | 702 | 			  "r" (x)); | 
 | 703 | } | 
 | 704 |  | 
 | 705 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | 
 | 706 |    spinlocks to avoid one state transition in the cache coherency protocol. */ | 
 | 707 | static inline void prefetchw(const void *x) | 
 | 708 | { | 
 | 709 | 	alternative_input(BASE_PREFETCH, | 
 | 710 | 			  "prefetchw (%1)", | 
 | 711 | 			  X86_FEATURE_3DNOW, | 
 | 712 | 			  "r" (x)); | 
 | 713 | } | 
 | 714 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 715 | #define spin_lock_prefetch(x)	prefetchw(x) | 
| Glauber de Oliveira Costa | 2f66dcc | 2008-01-30 13:31:57 +0100 | [diff] [blame] | 716 | #ifdef CONFIG_X86_32 | 
 | 717 | /* | 
 | 718 |  * User space process size: 3GB (default). | 
 | 719 |  */ | 
 | 720 | #define TASK_SIZE	(PAGE_OFFSET) | 
 | 721 |  | 
 | 722 | #define INIT_THREAD  {							\ | 
 | 723 | 	.sp0 = sizeof(init_stack) + (long)&init_stack,			\ | 
 | 724 | 	.vm86_info = NULL,						\ | 
 | 725 | 	.sysenter_cs = __KERNEL_CS,					\ | 
 | 726 | 	.io_bitmap_ptr = NULL,						\ | 
 | 727 | 	.fs = __KERNEL_PERCPU,						\ | 
 | 728 | } | 
 | 729 |  | 
 | 730 | /* | 
 | 731 |  * Note that the .io_bitmap member must be extra-big. This is because | 
 | 732 |  * the CPU will access an additional byte beyond the end of the IO | 
 | 733 |  * permission bitmap. The extra byte must be all 1 bits, and must | 
 | 734 |  * be within the limit. | 
 | 735 |  */ | 
 | 736 | #define INIT_TSS  {							\ | 
 | 737 | 	.x86_tss = {							\ | 
 | 738 | 		.sp0		= sizeof(init_stack) + (long)&init_stack, \ | 
 | 739 | 		.ss0		= __KERNEL_DS,				\ | 
 | 740 | 		.ss1		= __KERNEL_CS,				\ | 
 | 741 | 		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		\ | 
 | 742 | 	 },								\ | 
 | 743 | 	.io_bitmap	= { [0 ... IO_BITMAP_LONGS] = ~0 },		\ | 
 | 744 | } | 
 | 745 |  | 
 | 746 | #define start_thread(regs, new_eip, new_esp) do {		\ | 
 | 747 | 	__asm__("movl %0,%%gs": :"r" (0));			\ | 
 | 748 | 	regs->fs = 0;						\ | 
 | 749 | 	set_fs(USER_DS);					\ | 
 | 750 | 	regs->ds = __USER_DS;					\ | 
 | 751 | 	regs->es = __USER_DS;					\ | 
 | 752 | 	regs->ss = __USER_DS;					\ | 
 | 753 | 	regs->cs = __USER_CS;					\ | 
 | 754 | 	regs->ip = new_eip;					\ | 
 | 755 | 	regs->sp = new_esp;					\ | 
 | 756 | } while (0) | 
 | 757 |  | 
 | 758 |  | 
 | 759 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | 
 | 760 |  | 
 | 761 | #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long)) | 
 | 762 | #define KSTK_TOP(info)                                                 \ | 
 | 763 | ({                                                                     \ | 
 | 764 |        unsigned long *__ptr = (unsigned long *)(info);                 \ | 
 | 765 |        (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \ | 
 | 766 | }) | 
 | 767 |  | 
 | 768 | /* | 
 | 769 |  * The below -8 is to reserve 8 bytes on top of the ring0 stack. | 
 | 770 |  * This is necessary to guarantee that the entire "struct pt_regs" | 
 | 771 |  * is accessable even if the CPU haven't stored the SS/ESP registers | 
 | 772 |  * on the stack (interrupt gate does not save these registers | 
 | 773 |  * when switching to the same priv ring). | 
 | 774 |  * Therefore beware: accessing the ss/esp fields of the | 
 | 775 |  * "struct pt_regs" is possible, but they may contain the | 
 | 776 |  * completely wrong values. | 
 | 777 |  */ | 
 | 778 | #define task_pt_regs(task)                                             \ | 
 | 779 | ({                                                                     \ | 
 | 780 |        struct pt_regs *__regs__;                                       \ | 
 | 781 |        __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ | 
 | 782 |        __regs__ - 1;                                                   \ | 
 | 783 | }) | 
 | 784 |  | 
 | 785 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | 
 | 786 |  | 
 | 787 | #else | 
 | 788 | /* | 
 | 789 |  * User space process size. 47bits minus one guard page. | 
 | 790 |  */ | 
 | 791 | #define TASK_SIZE64	(0x800000000000UL - 4096) | 
 | 792 |  | 
 | 793 | /* This decides where the kernel will search for a free chunk of vm | 
 | 794 |  * space during mmap's. | 
 | 795 |  */ | 
 | 796 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ | 
 | 797 | 			   0xc0000000 : 0xFFFFe000) | 
 | 798 |  | 
 | 799 | #define TASK_SIZE 		(test_thread_flag(TIF_IA32) ? \ | 
 | 800 | 				 IA32_PAGE_OFFSET : TASK_SIZE64) | 
 | 801 | #define TASK_SIZE_OF(child) 	((test_tsk_thread_flag(child, TIF_IA32)) ? \ | 
 | 802 | 				  IA32_PAGE_OFFSET : TASK_SIZE64) | 
 | 803 |  | 
 | 804 | #define INIT_THREAD  { \ | 
 | 805 | 	.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | 
 | 806 | } | 
 | 807 |  | 
 | 808 | #define INIT_TSS  { \ | 
 | 809 | 	.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | 
 | 810 | } | 
 | 811 |  | 
 | 812 | #define start_thread(regs, new_rip, new_rsp) do { 			     \ | 
 | 813 | 	asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));  \ | 
 | 814 | 	load_gs_index(0);						     \ | 
 | 815 | 	(regs)->ip = (new_rip);						     \ | 
 | 816 | 	(regs)->sp = (new_rsp);						     \ | 
 | 817 | 	write_pda(oldrsp, (new_rsp));					     \ | 
 | 818 | 	(regs)->cs = __USER_CS;						     \ | 
 | 819 | 	(regs)->ss = __USER_DS;						     \ | 
 | 820 | 	(regs)->flags = 0x200;						     \ | 
 | 821 | 	set_fs(USER_DS);						     \ | 
 | 822 | } while (0) | 
 | 823 |  | 
 | 824 | /* | 
 | 825 |  * Return saved PC of a blocked thread. | 
 | 826 |  * What is this good for? it will be always the scheduler or ret_from_fork. | 
 | 827 |  */ | 
 | 828 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) | 
 | 829 |  | 
 | 830 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | 
 | 831 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | 
 | 832 | #endif /* CONFIG_X86_64 */ | 
 | 833 |  | 
| Glauber de Oliveira Costa | 683e025 | 2008-01-30 13:31:27 +0100 | [diff] [blame] | 834 | /* This decides where the kernel will search for a free chunk of vm | 
 | 835 |  * space during mmap's. | 
 | 836 |  */ | 
 | 837 | #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3)) | 
 | 838 |  | 
 | 839 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) | 
 | 840 |  | 
| Glauber de Oliveira Costa | c758ecf | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 841 | #endif |