| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * include/asm-x86_64/processor.h | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 1994 Linus Torvalds | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | #ifndef __ASM_X86_64_PROCESSOR_H | 
 | 8 | #define __ASM_X86_64_PROCESSOR_H | 
 | 9 |  | 
 | 10 | #include <asm/segment.h> | 
 | 11 | #include <asm/page.h> | 
 | 12 | #include <asm/types.h> | 
 | 13 | #include <asm/sigcontext.h> | 
 | 14 | #include <asm/cpufeature.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/threads.h> | 
 | 16 | #include <asm/msr.h> | 
 | 17 | #include <asm/current.h> | 
 | 18 | #include <asm/system.h> | 
 | 19 | #include <asm/mmsegment.h> | 
 | 20 | #include <asm/percpu.h> | 
 | 21 | #include <linux/personality.h> | 
| Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 22 | #include <linux/cpumask.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
 | 24 | #define TF_MASK		0x00000100 | 
 | 25 | #define IF_MASK		0x00000200 | 
 | 26 | #define IOPL_MASK	0x00003000 | 
 | 27 | #define NT_MASK		0x00004000 | 
 | 28 | #define VM_MASK		0x00020000 | 
 | 29 | #define AC_MASK		0x00040000 | 
 | 30 | #define VIF_MASK	0x00080000	/* virtual interrupt flag */ | 
 | 31 | #define VIP_MASK	0x00100000	/* virtual interrupt pending */ | 
 | 32 | #define ID_MASK		0x00200000 | 
 | 33 |  | 
 | 34 | #define desc_empty(desc) \ | 
| Zachary Amsden | 12aaa08 | 2005-08-16 12:05:09 -0700 | [diff] [blame] | 35 |                (!((desc)->a | (desc)->b)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
 | 37 | #define desc_equal(desc1, desc2) \ | 
 | 38 |                (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | 
 | 39 |  | 
 | 40 | /* | 
 | 41 |  * Default implementation of macro that returns current | 
 | 42 |  * instruction pointer ("program counter"). | 
 | 43 |  */ | 
 | 44 | #define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; }) | 
 | 45 |  | 
 | 46 | /* | 
 | 47 |  *  CPU type and hardware bug flags. Kept separately for each CPU. | 
 | 48 |  */ | 
 | 49 |  | 
 | 50 | struct cpuinfo_x86 { | 
 | 51 | 	__u8	x86;		/* CPU family */ | 
 | 52 | 	__u8	x86_vendor;	/* CPU vendor */ | 
 | 53 | 	__u8	x86_model; | 
 | 54 | 	__u8	x86_mask; | 
 | 55 | 	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */ | 
 | 56 | 	__u32	x86_capability[NCAPINTS]; | 
 | 57 | 	char	x86_vendor_id[16]; | 
 | 58 | 	char	x86_model_id[64]; | 
 | 59 | 	int 	x86_cache_size;  /* in KB */ | 
 | 60 | 	int	x86_clflush_size; | 
 | 61 | 	int	x86_cache_alignment; | 
 | 62 | 	int	x86_tlbsize;	/* number of 4K pages in DTLB/ITLB combined(in pages)*/ | 
 | 63 |         __u8    x86_virt_bits, x86_phys_bits; | 
| Siddha, Suresh B | 94605ef | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 64 | 	__u8	x86_max_cores;	/* cpuid returned max cores value */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |         __u32   x86_power; 	 | 
| Andi Kleen | ebfcaa9 | 2005-04-16 15:25:18 -0700 | [diff] [blame] | 66 | 	__u32   extended_cpuid_level;	/* Max extended CPUID function supported */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | 	unsigned long loops_per_jiffy; | 
| Siddha, Suresh B | 1e9f28f | 2006-03-27 01:15:22 -0800 | [diff] [blame] | 68 | #ifdef CONFIG_SMP | 
 | 69 | 	cpumask_t llc_shared_map;	/* cpus sharing the last level cache */ | 
 | 70 | #endif | 
| Siddha, Suresh B | 94605ef | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 71 | 	__u8	apicid; | 
| Rohit Seth | f3fa8eb | 2006-06-26 13:58:17 +0200 | [diff] [blame] | 72 | #ifdef CONFIG_SMP | 
| Siddha, Suresh B | 94605ef | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 73 | 	__u8	booted_cores;	/* number of cores as seen by OS */ | 
| Rohit Seth | f3fa8eb | 2006-06-26 13:58:17 +0200 | [diff] [blame] | 74 | 	__u8	phys_proc_id;	/* Physical Processor id. */ | 
 | 75 | 	__u8	cpu_core_id;	/* Core id. */ | 
 | 76 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | } ____cacheline_aligned; | 
 | 78 |  | 
 | 79 | #define X86_VENDOR_INTEL 0 | 
 | 80 | #define X86_VENDOR_CYRIX 1 | 
 | 81 | #define X86_VENDOR_AMD 2 | 
 | 82 | #define X86_VENDOR_UMC 3 | 
 | 83 | #define X86_VENDOR_NEXGEN 4 | 
 | 84 | #define X86_VENDOR_CENTAUR 5 | 
 | 85 | #define X86_VENDOR_RISE 6 | 
 | 86 | #define X86_VENDOR_TRANSMETA 7 | 
 | 87 | #define X86_VENDOR_NUM 8 | 
 | 88 | #define X86_VENDOR_UNKNOWN 0xff | 
 | 89 |  | 
 | 90 | #ifdef CONFIG_SMP | 
 | 91 | extern struct cpuinfo_x86 cpu_data[]; | 
 | 92 | #define current_cpu_data cpu_data[smp_processor_id()] | 
 | 93 | #else | 
 | 94 | #define cpu_data (&boot_cpu_data) | 
 | 95 | #define current_cpu_data boot_cpu_data | 
 | 96 | #endif | 
 | 97 |  | 
 | 98 | extern char ignore_irq13; | 
 | 99 |  | 
 | 100 | extern void identify_cpu(struct cpuinfo_x86 *); | 
 | 101 | extern void print_cpu_info(struct cpuinfo_x86 *); | 
 | 102 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 
| Andi Kleen | 240cd6a | 2006-06-26 13:56:13 +0200 | [diff] [blame] | 103 | extern unsigned short num_cache_leaves; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 |  | 
 | 105 | /* | 
 | 106 |  * EFLAGS bits | 
 | 107 |  */ | 
 | 108 | #define X86_EFLAGS_CF	0x00000001 /* Carry Flag */ | 
 | 109 | #define X86_EFLAGS_PF	0x00000004 /* Parity Flag */ | 
 | 110 | #define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */ | 
 | 111 | #define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */ | 
 | 112 | #define X86_EFLAGS_SF	0x00000080 /* Sign Flag */ | 
 | 113 | #define X86_EFLAGS_TF	0x00000100 /* Trap Flag */ | 
 | 114 | #define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */ | 
 | 115 | #define X86_EFLAGS_DF	0x00000400 /* Direction Flag */ | 
 | 116 | #define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */ | 
 | 117 | #define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */ | 
 | 118 | #define X86_EFLAGS_NT	0x00004000 /* Nested Task */ | 
 | 119 | #define X86_EFLAGS_RF	0x00010000 /* Resume Flag */ | 
 | 120 | #define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */ | 
 | 121 | #define X86_EFLAGS_AC	0x00040000 /* Alignment Check */ | 
 | 122 | #define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */ | 
 | 123 | #define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */ | 
 | 124 | #define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */ | 
 | 125 |  | 
 | 126 | /* | 
 | 127 |  * Intel CPU features in CR4 | 
 | 128 |  */ | 
 | 129 | #define X86_CR4_VME		0x0001	/* enable vm86 extensions */ | 
 | 130 | #define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */ | 
 | 131 | #define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */ | 
 | 132 | #define X86_CR4_DE		0x0008	/* enable debugging extensions */ | 
 | 133 | #define X86_CR4_PSE		0x0010	/* enable page size extensions */ | 
 | 134 | #define X86_CR4_PAE		0x0020	/* enable physical address extensions */ | 
 | 135 | #define X86_CR4_MCE		0x0040	/* Machine check enable */ | 
 | 136 | #define X86_CR4_PGE		0x0080	/* enable global pages */ | 
 | 137 | #define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */ | 
 | 138 | #define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */ | 
 | 139 | #define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */ | 
 | 140 |  | 
 | 141 | /* | 
 | 142 |  * Save the cr4 feature set we're using (ie | 
 | 143 |  * Pentium 4MB enable and PPro Global page | 
 | 144 |  * enable), so that any CPU's that boot up | 
 | 145 |  * after us can get the correct flags. | 
 | 146 |  */ | 
 | 147 | extern unsigned long mmu_cr4_features; | 
 | 148 |  | 
 | 149 | static inline void set_in_cr4 (unsigned long mask) | 
 | 150 | { | 
 | 151 | 	mmu_cr4_features |= mask; | 
 | 152 | 	__asm__("movq %%cr4,%%rax\n\t" | 
 | 153 | 		"orq %0,%%rax\n\t" | 
 | 154 | 		"movq %%rax,%%cr4\n" | 
 | 155 | 		: : "irg" (mask) | 
 | 156 | 		:"ax"); | 
 | 157 | } | 
 | 158 |  | 
 | 159 | static inline void clear_in_cr4 (unsigned long mask) | 
 | 160 | { | 
 | 161 | 	mmu_cr4_features &= ~mask; | 
 | 162 | 	__asm__("movq %%cr4,%%rax\n\t" | 
 | 163 | 		"andq %0,%%rax\n\t" | 
 | 164 | 		"movq %%rax,%%cr4\n" | 
 | 165 | 		: : "irg" (~mask) | 
 | 166 | 		:"ax"); | 
 | 167 | } | 
 | 168 |  | 
 | 169 |  | 
 | 170 | /* | 
| Andi Kleen | 637716a | 2005-05-16 21:53:20 -0700 | [diff] [blame] | 171 |  * User space process size. 47bits minus one guard page. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 |  */ | 
| Suresh Siddha | 8492980 | 2005-06-21 17:14:32 -0700 | [diff] [blame] | 173 | #define TASK_SIZE64	(0x800000000000UL - 4096) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 |  | 
 | 175 | /* This decides where the kernel will search for a free chunk of vm | 
 | 176 |  * space during mmap's. | 
 | 177 |  */ | 
 | 178 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000) | 
| Suresh Siddha | 8492980 | 2005-06-21 17:14:32 -0700 | [diff] [blame] | 179 |  | 
 | 180 | #define TASK_SIZE 		(test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) | 
 | 181 | #define TASK_SIZE_OF(child) 	((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) | 
 | 182 |  | 
 | 183 | #define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE/3) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 |  | 
 | 185 | /* | 
 | 186 |  * Size of io_bitmap. | 
 | 187 |  */ | 
 | 188 | #define IO_BITMAP_BITS  65536 | 
 | 189 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | 
 | 190 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | 
 | 191 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) | 
 | 192 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | 
 | 193 |  | 
 | 194 | struct i387_fxsave_struct { | 
 | 195 | 	u16	cwd; | 
 | 196 | 	u16	swd; | 
 | 197 | 	u16	twd; | 
 | 198 | 	u16	fop; | 
 | 199 | 	u64	rip; | 
 | 200 | 	u64	rdp;  | 
 | 201 | 	u32	mxcsr; | 
 | 202 | 	u32	mxcsr_mask; | 
 | 203 | 	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */ | 
 | 204 | 	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 128 bytes */ | 
 | 205 | 	u32	padding[24]; | 
 | 206 | } __attribute__ ((aligned (16))); | 
 | 207 |  | 
 | 208 | union i387_union { | 
 | 209 | 	struct i387_fxsave_struct	fxsave; | 
 | 210 | }; | 
 | 211 |  | 
 | 212 | struct tss_struct { | 
 | 213 | 	u32 reserved1; | 
 | 214 | 	u64 rsp0;	 | 
 | 215 | 	u64 rsp1; | 
 | 216 | 	u64 rsp2; | 
 | 217 | 	u64 reserved2; | 
 | 218 | 	u64 ist[7]; | 
 | 219 | 	u32 reserved3; | 
 | 220 | 	u32 reserved4; | 
 | 221 | 	u16 reserved5; | 
 | 222 | 	u16 io_bitmap_base; | 
 | 223 | 	/* | 
 | 224 | 	 * The extra 1 is there because the CPU will access an | 
 | 225 | 	 * additional byte beyond the end of the IO permission | 
 | 226 | 	 * bitmap. The extra byte must be all 1 bits, and must | 
 | 227 | 	 * be within the limit. Thus we have: | 
 | 228 | 	 * | 
 | 229 | 	 * 128 bytes, the bitmap itself, for ports 0..0x3ff | 
 | 230 | 	 * 8 bytes, for an extra "long" of ~0UL | 
 | 231 | 	 */ | 
 | 232 | 	unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | 
 | 233 | } __attribute__((packed)) ____cacheline_aligned; | 
 | 234 |  | 
| Keith Owens | 01ebb77 | 2006-08-30 19:37:19 +0200 | [diff] [blame] | 235 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | extern struct cpuinfo_x86 boot_cpu_data; | 
 | 237 | DECLARE_PER_CPU(struct tss_struct,init_tss); | 
| Keith Owens | 01ebb77 | 2006-08-30 19:37:19 +0200 | [diff] [blame] | 238 | /* Save the original ist values for checking stack pointers during debugging */ | 
 | 239 | struct orig_ist { | 
 | 240 | 	unsigned long ist[7]; | 
 | 241 | }; | 
 | 242 | DECLARE_PER_CPU(struct orig_ist, orig_ist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 |  | 
| Ravikiran G Thirumalai | 5fd63b3 | 2006-01-11 22:46:15 +0100 | [diff] [blame] | 244 | #ifdef CONFIG_X86_VSMP | 
 | 245 | #define ARCH_MIN_TASKALIGN	(1 << INTERNODE_CACHE_SHIFT) | 
 | 246 | #define ARCH_MIN_MMSTRUCT_ALIGN	(1 << INTERNODE_CACHE_SHIFT) | 
 | 247 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | #define ARCH_MIN_TASKALIGN	16 | 
| Ravikiran G Thirumalai | 5fd63b3 | 2006-01-11 22:46:15 +0100 | [diff] [blame] | 249 | #define ARCH_MIN_MMSTRUCT_ALIGN	0 | 
 | 250 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 |  | 
 | 252 | struct thread_struct { | 
 | 253 | 	unsigned long	rsp0; | 
 | 254 | 	unsigned long	rsp; | 
 | 255 | 	unsigned long 	userrsp;	/* Copy from PDA */  | 
 | 256 | 	unsigned long	fs; | 
 | 257 | 	unsigned long	gs; | 
 | 258 | 	unsigned short	es, ds, fsindex, gsindex;	 | 
 | 259 | /* Hardware debugging registers */ | 
 | 260 | 	unsigned long	debugreg0;   | 
 | 261 | 	unsigned long	debugreg1;   | 
 | 262 | 	unsigned long	debugreg2;   | 
 | 263 | 	unsigned long	debugreg3;   | 
 | 264 | 	unsigned long	debugreg6;   | 
 | 265 | 	unsigned long	debugreg7;   | 
 | 266 | /* fault info */ | 
 | 267 | 	unsigned long	cr2, trap_no, error_code; | 
 | 268 | /* floating point info */ | 
 | 269 | 	union i387_union	i387  __attribute__((aligned(16))); | 
 | 270 | /* IO permissions. the bitmap could be moved into the GDT, that would make | 
 | 271 |    switch faster for a limited number of ioperm using tasks. -AK */ | 
 | 272 | 	int		ioperm; | 
 | 273 | 	unsigned long	*io_bitmap_ptr; | 
 | 274 | 	unsigned io_bitmap_max; | 
 | 275 | /* cached TLS descriptors. */ | 
 | 276 | 	u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; | 
 | 277 | } __attribute__((aligned(16))); | 
 | 278 |  | 
| Andi Kleen | a0d58c9 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 279 | #define INIT_THREAD  { \ | 
 | 280 | 	.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | 
 | 281 | } | 
 | 282 |  | 
 | 283 | #define INIT_TSS  { \ | 
 | 284 | 	.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | 
 | 285 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 |  | 
 | 287 | #define INIT_MMAP \ | 
 | 288 | { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } | 
 | 289 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | #define start_thread(regs,new_rip,new_rsp) do { \ | 
 | 291 | 	asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));	 \ | 
 | 292 | 	load_gs_index(0);							\ | 
 | 293 | 	(regs)->rip = (new_rip);						 \ | 
 | 294 | 	(regs)->rsp = (new_rsp);						 \ | 
 | 295 | 	write_pda(oldrsp, (new_rsp));						 \ | 
 | 296 | 	(regs)->cs = __USER_CS;							 \ | 
 | 297 | 	(regs)->ss = __USER_DS;							 \ | 
 | 298 | 	(regs)->eflags = 0x200;							 \ | 
 | 299 | 	set_fs(USER_DS);							 \ | 
 | 300 | } while(0)  | 
 | 301 |  | 
| Vincent Hanquez | e9129e5 | 2005-06-23 00:08:46 -0700 | [diff] [blame] | 302 | #define get_debugreg(var, register)				\ | 
 | 303 | 		__asm__("movq %%db" #register ", %0"		\ | 
 | 304 | 			:"=r" (var)) | 
 | 305 | #define set_debugreg(value, register)			\ | 
 | 306 | 		__asm__("movq %0,%%db" #register		\ | 
 | 307 | 			: /* no output */			\ | 
 | 308 | 			:"r" (value)) | 
 | 309 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | struct task_struct; | 
 | 311 | struct mm_struct; | 
 | 312 |  | 
 | 313 | /* Free all resources held by a thread. */ | 
 | 314 | extern void release_thread(struct task_struct *); | 
 | 315 |  | 
 | 316 | /* Prepare to copy thread state - unlazy all lazy status */ | 
 | 317 | extern void prepare_to_copy(struct task_struct *tsk); | 
 | 318 |  | 
 | 319 | /* | 
 | 320 |  * create a kernel thread without removing it from tasklists | 
 | 321 |  */ | 
 | 322 | extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | 
 | 323 |  | 
 | 324 | /* | 
 | 325 |  * Return saved PC of a blocked thread. | 
 | 326 |  * What is this good for? it will be always the scheduler or ret_from_fork. | 
 | 327 |  */ | 
 | 328 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) | 
 | 329 |  | 
 | 330 | extern unsigned long get_wchan(struct task_struct *p); | 
| Al Viro | bb04923 | 2006-01-12 01:05:38 -0800 | [diff] [blame] | 331 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1) | 
 | 332 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | 
 | 334 |  | 
 | 335 |  | 
 | 336 | struct microcode_header { | 
 | 337 | 	unsigned int hdrver; | 
 | 338 | 	unsigned int rev; | 
 | 339 | 	unsigned int date; | 
 | 340 | 	unsigned int sig; | 
 | 341 | 	unsigned int cksum; | 
 | 342 | 	unsigned int ldrver; | 
 | 343 | 	unsigned int pf; | 
 | 344 | 	unsigned int datasize; | 
 | 345 | 	unsigned int totalsize; | 
 | 346 | 	unsigned int reserved[3]; | 
 | 347 | }; | 
 | 348 |  | 
 | 349 | struct microcode { | 
 | 350 | 	struct microcode_header hdr; | 
 | 351 | 	unsigned int bits[0]; | 
 | 352 | }; | 
 | 353 |  | 
 | 354 | typedef struct microcode microcode_t; | 
 | 355 | typedef struct microcode_header microcode_header_t; | 
 | 356 |  | 
 | 357 | /* microcode format is extended from prescott processors */ | 
 | 358 | struct extended_signature { | 
 | 359 | 	unsigned int sig; | 
 | 360 | 	unsigned int pf; | 
 | 361 | 	unsigned int cksum; | 
 | 362 | }; | 
 | 363 |  | 
 | 364 | struct extended_sigtable { | 
 | 365 | 	unsigned int count; | 
 | 366 | 	unsigned int cksum; | 
 | 367 | 	unsigned int reserved[3]; | 
 | 368 | 	struct extended_signature sigs[0]; | 
 | 369 | }; | 
 | 370 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 |  | 
 | 372 | #define ASM_NOP1 K8_NOP1 | 
 | 373 | #define ASM_NOP2 K8_NOP2 | 
 | 374 | #define ASM_NOP3 K8_NOP3 | 
 | 375 | #define ASM_NOP4 K8_NOP4 | 
 | 376 | #define ASM_NOP5 K8_NOP5 | 
 | 377 | #define ASM_NOP6 K8_NOP6 | 
 | 378 | #define ASM_NOP7 K8_NOP7 | 
 | 379 | #define ASM_NOP8 K8_NOP8 | 
 | 380 |  | 
 | 381 | /* Opteron nops */ | 
 | 382 | #define K8_NOP1 ".byte 0x90\n" | 
 | 383 | #define K8_NOP2	".byte 0x66,0x90\n"  | 
 | 384 | #define K8_NOP3	".byte 0x66,0x66,0x90\n"  | 
 | 385 | #define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n"  | 
 | 386 | #define K8_NOP5	K8_NOP3 K8_NOP2  | 
 | 387 | #define K8_NOP6	K8_NOP3 K8_NOP3 | 
 | 388 | #define K8_NOP7	K8_NOP4 K8_NOP3 | 
 | 389 | #define K8_NOP8	K8_NOP4 K8_NOP4 | 
 | 390 |  | 
 | 391 | #define ASM_NOP_MAX 8 | 
 | 392 |  | 
 | 393 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | 
| Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 394 | static inline void rep_nop(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | { | 
 | 396 | 	__asm__ __volatile__("rep;nop": : :"memory"); | 
 | 397 | } | 
 | 398 |  | 
 | 399 | /* Stop speculative execution */ | 
| Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 400 | static inline void sync_core(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | {  | 
 | 402 | 	int tmp; | 
 | 403 | 	asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | 
 | 404 | }  | 
 | 405 |  | 
 | 406 | #define cpu_has_fpu 1 | 
 | 407 |  | 
 | 408 | #define ARCH_HAS_PREFETCH | 
 | 409 | static inline void prefetch(void *x)  | 
 | 410 | {  | 
 | 411 | 	asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); | 
 | 412 | }  | 
 | 413 |  | 
 | 414 | #define ARCH_HAS_PREFETCHW 1 | 
 | 415 | static inline void prefetchw(void *x)  | 
 | 416 | {  | 
| Eric Dumazet | 19aaabb | 2005-09-06 15:16:17 -0700 | [diff] [blame] | 417 | 	alternative_input("prefetcht0 (%1)", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | 			  "prefetchw (%1)", | 
 | 419 | 			  X86_FEATURE_3DNOW, | 
 | 420 | 			  "r" (x)); | 
 | 421 | }  | 
 | 422 |  | 
 | 423 | #define ARCH_HAS_SPINLOCK_PREFETCH 1 | 
 | 424 |  | 
 | 425 | #define spin_lock_prefetch(x)  prefetchw(x) | 
 | 426 |  | 
 | 427 | #define cpu_relax()   rep_nop() | 
 | 428 |  | 
 | 429 | /* | 
 | 430 |  *      NSC/Cyrix CPU configuration register indexes | 
 | 431 |  */ | 
 | 432 | #define CX86_CCR0 0xc0 | 
 | 433 | #define CX86_CCR1 0xc1 | 
 | 434 | #define CX86_CCR2 0xc2 | 
 | 435 | #define CX86_CCR3 0xc3 | 
 | 436 | #define CX86_CCR4 0xe8 | 
 | 437 | #define CX86_CCR5 0xe9 | 
 | 438 | #define CX86_CCR6 0xea | 
 | 439 | #define CX86_CCR7 0xeb | 
 | 440 | #define CX86_DIR0 0xfe | 
 | 441 | #define CX86_DIR1 0xff | 
 | 442 | #define CX86_ARR_BASE 0xc4 | 
 | 443 | #define CX86_RCR_BASE 0xdc | 
 | 444 |  | 
 | 445 | /* | 
 | 446 |  *      NSC/Cyrix CPU indexed register access macros | 
 | 447 |  */ | 
 | 448 |  | 
 | 449 | #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); }) | 
 | 450 |  | 
 | 451 | #define setCx86(reg, data) do { \ | 
 | 452 | 	outb((reg), 0x22); \ | 
 | 453 | 	outb((data), 0x23); \ | 
 | 454 | } while (0) | 
 | 455 |  | 
| Zachary Amsden | 245067d | 2005-09-03 15:56:37 -0700 | [diff] [blame] | 456 | static inline void serialize_cpu(void) | 
 | 457 | { | 
 | 458 | 	__asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); | 
 | 459 | } | 
 | 460 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | static inline void __monitor(const void *eax, unsigned long ecx, | 
 | 462 | 		unsigned long edx) | 
 | 463 | { | 
 | 464 | 	/* "monitor %eax,%ecx,%edx;" */ | 
 | 465 | 	asm volatile( | 
 | 466 | 		".byte 0x0f,0x01,0xc8;" | 
 | 467 | 		: :"a" (eax), "c" (ecx), "d"(edx)); | 
 | 468 | } | 
 | 469 |  | 
 | 470 | static inline void __mwait(unsigned long eax, unsigned long ecx) | 
 | 471 | { | 
 | 472 | 	/* "mwait %eax,%ecx;" */ | 
 | 473 | 	asm volatile( | 
 | 474 | 		".byte 0x0f,0x01,0xc9;" | 
 | 475 | 		: :"a" (eax), "c" (ecx)); | 
 | 476 | } | 
 | 477 |  | 
| Venkatesh Pallipadi | d331e73 | 2006-12-07 02:14:13 +0100 | [diff] [blame] | 478 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | 
 | 479 | { | 
 | 480 | 	/* "mwait %eax,%ecx;" */ | 
 | 481 | 	asm volatile( | 
 | 482 | 		"sti; .byte 0x0f,0x01,0xc9;" | 
 | 483 | 		: :"a" (eax), "c" (ecx)); | 
 | 484 | } | 
 | 485 |  | 
| Venkatesh Pallipadi | 991528d | 2006-09-25 16:28:13 -0700 | [diff] [blame] | 486 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 
 | 487 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | #define stack_current() \ | 
 | 489 | ({								\ | 
 | 490 | 	struct thread_info *ti;					\ | 
 | 491 | 	asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\ | 
 | 492 | 	ti->task;					\ | 
 | 493 | }) | 
 | 494 |  | 
 | 495 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | 
 | 496 |  | 
 | 497 | extern unsigned long boot_option_idle_override; | 
 | 498 | /* Boot loader type from the setup header */ | 
 | 499 | extern int bootloader_type; | 
 | 500 |  | 
| Andi Kleen | 8817210 | 2006-01-17 07:03:38 +0100 | [diff] [blame] | 501 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | 
 | 502 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | #endif /* __ASM_X86_64_PROCESSOR_H */ |