blob: bfac9739f57e8ce9eddb47cb2f7e4353f51b1fc6 [file] [log] [blame]
Glauber de Oliveira Costac758ecf2008-01-30 13:31:03 +01001#ifndef __ASM_X86_PROCESSOR_H
2#define __ASM_X86_PROCESSOR_H
3
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +01004#include <asm/processor-flags.h>
5
Glauber de Oliveira Costa683e0252008-01-30 13:31:27 +01006/* Forward declaration, a strange C thing */
7struct task_struct;
8struct mm_struct;
9
Glauber de Oliveira Costac72dcf82008-01-30 13:31:27 +010010#include <asm/page.h>
Glauber de Oliveira Costaca241c72008-01-30 13:31:31 +010011#include <asm/percpu.h>
Glauber de Oliveira Costac72dcf82008-01-30 13:31:27 +010012#include <asm/system.h>
Glauber de Oliveira Costa5300db82008-01-30 13:31:33 +010013#include <asm/percpu.h>
14#include <linux/cpumask.h>
15#include <linux/cache.h>
Glauber de Oliveira Costac72dcf82008-01-30 13:31:27 +010016
Glauber de Oliveira Costa0ccb8ac2008-01-30 13:31:27 +010017/*
18 * Default implementation of macro that returns current
19 * instruction pointer ("program counter").
20 */
21static inline void *current_text_addr(void)
22{
23 void *pc;
24 asm volatile("mov $1f,%0\n1:":"=r" (pc));
25 return pc;
26}
27
Glauber de Oliveira Costadbcb4662008-01-30 13:31:31 +010028#ifdef CONFIG_X86_VSMP
29#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
30#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
31#else
32#define ARCH_MIN_TASKALIGN 16
33#define ARCH_MIN_MMSTRUCT_ALIGN 0
34#endif
35
Glauber de Oliveira Costa5300db82008-01-30 13:31:33 +010036/*
37 * CPU type and hardware bug flags. Kept separately for each CPU.
38 * Members of this structure are referenced in head.S, so think twice
39 * before touching them. [mj]
40 */
41
42struct cpuinfo_x86 {
43 __u8 x86; /* CPU family */
44 __u8 x86_vendor; /* CPU vendor */
45 __u8 x86_model;
46 __u8 x86_mask;
47#ifdef CONFIG_X86_32
48 char wp_works_ok; /* It doesn't on 386's */
49 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
50 char hard_math;
51 char rfu;
52 char fdiv_bug;
53 char f00f_bug;
54 char coma_bug;
55 char pad0;
56#else
57 /* number of 4K pages in DTLB/ITLB combined(in pages)*/
58 int x86_tlbsize;
59 __u8 x86_virt_bits, x86_phys_bits;
60 /* cpuid returned core id bits */
61 __u8 x86_coreid_bits;
62 /* Max extended CPUID function supported */
63 __u32 extended_cpuid_level;
64#endif
65 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
66 __u32 x86_capability[NCAPINTS];
67 char x86_vendor_id[16];
68 char x86_model_id[64];
69 int x86_cache_size; /* in KB - valid for CPUS which support this
70 call */
71 int x86_cache_alignment; /* In bytes */
72 int x86_power;
73 unsigned long loops_per_jiffy;
74#ifdef CONFIG_SMP
75 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
76#endif
77 unsigned char x86_max_cores; /* cpuid returned max cores value */
78 unsigned char apicid;
79 unsigned short x86_clflush_size;
80#ifdef CONFIG_SMP
81 unsigned char booted_cores; /* number of cores as seen by OS */
82 __u8 phys_proc_id; /* Physical processor id. */
83 __u8 cpu_core_id; /* Core id */
84 __u8 cpu_index; /* index into per_cpu list */
85#endif
86} __attribute__((__aligned__(SMP_CACHE_BYTES)));
87
88#define X86_VENDOR_INTEL 0
89#define X86_VENDOR_CYRIX 1
90#define X86_VENDOR_AMD 2
91#define X86_VENDOR_UMC 3
92#define X86_VENDOR_NEXGEN 4
93#define X86_VENDOR_CENTAUR 5
94#define X86_VENDOR_TRANSMETA 7
95#define X86_VENDOR_NSC 8
96#define X86_VENDOR_NUM 9
97#define X86_VENDOR_UNKNOWN 0xff
98
Glauber de Oliveira Costa1a539052008-01-30 13:31:39 +010099/*
100 * capabilities of CPUs
101 */
Glauber de Oliveira Costa5300db82008-01-30 13:31:33 +0100102extern struct cpuinfo_x86 boot_cpu_data;
Glauber de Oliveira Costa1a539052008-01-30 13:31:39 +0100103extern struct cpuinfo_x86 new_cpu_data;
104extern struct tss_struct doublefault_tss;
Glauber de Oliveira Costa5300db82008-01-30 13:31:33 +0100105
106#ifdef CONFIG_SMP
107DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
108#define cpu_data(cpu) per_cpu(cpu_info, cpu)
109#define current_cpu_data cpu_data(smp_processor_id())
110#else
111#define cpu_data(cpu) boot_cpu_data
112#define current_cpu_data boot_cpu_data
113#endif
114
Glauber de Oliveira Costa1a539052008-01-30 13:31:39 +0100115void cpu_detect(struct cpuinfo_x86 *c);
116
117extern void identify_cpu(struct cpuinfo_x86 *);
118extern void identify_boot_cpu(void);
119extern void identify_secondary_cpu(struct cpuinfo_x86 *);
Glauber de Oliveira Costa5300db82008-01-30 13:31:33 +0100120extern void print_cpu_info(struct cpuinfo_x86 *);
121extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
122extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
123extern unsigned short num_cache_leaves;
124
Glauber de Oliveira Costa1a539052008-01-30 13:31:39 +0100125#if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
126extern void detect_ht(struct cpuinfo_x86 *c);
127#else
128static inline void detect_ht(struct cpuinfo_x86 *c) {}
129#endif
130
Glauber de Oliveira Costac758ecf2008-01-30 13:31:03 +0100131static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
132 unsigned int *ecx, unsigned int *edx)
133{
134 /* ecx is often an input as well as an output. */
135 __asm__("cpuid"
136 : "=a" (*eax),
137 "=b" (*ebx),
138 "=c" (*ecx),
139 "=d" (*edx)
140 : "0" (*eax), "2" (*ecx));
141}
142
Glauber de Oliveira Costac72dcf82008-01-30 13:31:27 +0100143static inline void load_cr3(pgd_t *pgdir)
144{
145 write_cr3(__pa(pgdir));
146}
Glauber de Oliveira Costac758ecf2008-01-30 13:31:03 +0100147
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200148#ifdef CONFIG_X86_32
Glauber de Oliveira Costaca241c72008-01-30 13:31:31 +0100149/* This is the TSS defined by the hardware. */
150struct x86_hw_tss {
151 unsigned short back_link, __blh;
152 unsigned long sp0;
153 unsigned short ss0, __ss0h;
154 unsigned long sp1;
155 unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
156 unsigned long sp2;
157 unsigned short ss2, __ss2h;
158 unsigned long __cr3;
159 unsigned long ip;
160 unsigned long flags;
161 unsigned long ax, cx, dx, bx;
162 unsigned long sp, bp, si, di;
163 unsigned short es, __esh;
164 unsigned short cs, __csh;
165 unsigned short ss, __ssh;
166 unsigned short ds, __dsh;
167 unsigned short fs, __fsh;
168 unsigned short gs, __gsh;
169 unsigned short ldt, __ldth;
170 unsigned short trace, io_bitmap_base;
171} __attribute__((packed));
172#else
173struct x86_hw_tss {
174 u32 reserved1;
175 u64 sp0;
176 u64 sp1;
177 u64 sp2;
178 u64 reserved2;
179 u64 ist[7];
180 u32 reserved3;
181 u32 reserved4;
182 u16 reserved5;
183 u16 io_bitmap_base;
184} __attribute__((packed)) ____cacheline_aligned;
185#endif
186
187/*
188 * Size of io_bitmap.
189 */
190#define IO_BITMAP_BITS 65536
191#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
192#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
193#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
194#define INVALID_IO_BITMAP_OFFSET 0x8000
195#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
196
197struct tss_struct {
198 struct x86_hw_tss x86_tss;
199
200 /*
201 * The extra 1 is there because the CPU will access an
202 * additional byte beyond the end of the IO permission
203 * bitmap. The extra byte must be all 1 bits, and must
204 * be within the limit.
205 */
206 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
207 /*
208 * Cache the current maximum and the last task that used the bitmap:
209 */
210 unsigned long io_bitmap_max;
211 struct thread_struct *io_bitmap_owner;
212 /*
213 * pads the TSS to be cacheline-aligned (size is 0x100)
214 */
215 unsigned long __cacheline_filler[35];
216 /*
217 * .. and then another 0x100 bytes for emergency kernel stack
218 */
219 unsigned long stack[64];
220} __attribute__((packed));
221
222DECLARE_PER_CPU(struct tss_struct, init_tss);
223
Glauber de Oliveira Costa1a539052008-01-30 13:31:39 +0100224/* Save the original ist values for checking stack pointers during debugging */
225struct orig_ist {
226 unsigned long ist[7];
227};
228
Glauber de Oliveira Costaca241c72008-01-30 13:31:31 +0100229#ifdef CONFIG_X86_32
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200230# include "processor_32.h"
231#else
232# include "processor_64.h"
233#endif
Glauber de Oliveira Costac758ecf2008-01-30 13:31:03 +0100234
Glauber de Oliveira Costa683e0252008-01-30 13:31:27 +0100235extern void print_cpu_info(struct cpuinfo_x86 *);
236extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
237extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
238extern unsigned short num_cache_leaves;
239
Glauber de Oliveira Costacb38d372008-01-30 13:31:31 +0100240struct thread_struct {
241/* cached TLS descriptors. */
242 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
243 unsigned long sp0;
244 unsigned long sp;
245#ifdef CONFIG_X86_32
246 unsigned long sysenter_cs;
247#else
248 unsigned long usersp; /* Copy from PDA */
249 unsigned short es, ds, fsindex, gsindex;
250#endif
251 unsigned long ip;
252 unsigned long fs;
253 unsigned long gs;
254/* Hardware debugging registers */
255 unsigned long debugreg0;
256 unsigned long debugreg1;
257 unsigned long debugreg2;
258 unsigned long debugreg3;
259 unsigned long debugreg6;
260 unsigned long debugreg7;
261/* fault info */
262 unsigned long cr2, trap_no, error_code;
263/* floating point info */
264 union i387_union i387 __attribute__((aligned(16)));;
265#ifdef CONFIG_X86_32
266/* virtual 86 mode info */
267 struct vm86_struct __user *vm86_info;
268 unsigned long screen_bitmap;
269 unsigned long v86flags, v86mask, saved_sp0;
270 unsigned int saved_fs, saved_gs;
271#endif
272/* IO permissions */
273 unsigned long *io_bitmap_ptr;
274 unsigned long iopl;
275/* max allowed port in the bitmap, in bytes: */
276 unsigned io_bitmap_max;
277/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
278 unsigned long debugctlmsr;
279/* Debug Store - if not 0 points to a DS Save Area configuration;
280 * goes into MSR_IA32_DS_AREA */
281 unsigned long ds_area_msr;
282};
283
Glauber de Oliveira Costa1b46cbe2008-01-30 13:31:27 +0100284static inline unsigned long native_get_debugreg(int regno)
285{
286 unsigned long val = 0; /* Damn you, gcc! */
287
288 switch (regno) {
289 case 0:
290 asm("mov %%db0, %0" :"=r" (val)); break;
291 case 1:
292 asm("mov %%db1, %0" :"=r" (val)); break;
293 case 2:
294 asm("mov %%db2, %0" :"=r" (val)); break;
295 case 3:
296 asm("mov %%db3, %0" :"=r" (val)); break;
297 case 6:
298 asm("mov %%db6, %0" :"=r" (val)); break;
299 case 7:
300 asm("mov %%db7, %0" :"=r" (val)); break;
301 default:
302 BUG();
303 }
304 return val;
305}
306
307static inline void native_set_debugreg(int regno, unsigned long value)
308{
309 switch (regno) {
310 case 0:
311 asm("mov %0,%%db0" : /* no output */ :"r" (value));
312 break;
313 case 1:
314 asm("mov %0,%%db1" : /* no output */ :"r" (value));
315 break;
316 case 2:
317 asm("mov %0,%%db2" : /* no output */ :"r" (value));
318 break;
319 case 3:
320 asm("mov %0,%%db3" : /* no output */ :"r" (value));
321 break;
322 case 6:
323 asm("mov %0,%%db6" : /* no output */ :"r" (value));
324 break;
325 case 7:
326 asm("mov %0,%%db7" : /* no output */ :"r" (value));
327 break;
328 default:
329 BUG();
330 }
331}
332
Glauber de Oliveira Costa62d7d7e2008-01-30 13:31:27 +0100333/*
334 * Set IOPL bits in EFLAGS from given mask
335 */
336static inline void native_set_iopl_mask(unsigned mask)
337{
338#ifdef CONFIG_X86_32
339 unsigned int reg;
340 __asm__ __volatile__ ("pushfl;"
341 "popl %0;"
342 "andl %1, %0;"
343 "orl %2, %0;"
344 "pushl %0;"
345 "popfl"
346 : "=&r" (reg)
347 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
348#endif
349}
350
Glauber de Oliveira Costa7818a1e2008-01-30 13:31:31 +0100351static inline void native_load_sp0(struct tss_struct *tss,
352 struct thread_struct *thread)
353{
354 tss->x86_tss.sp0 = thread->sp0;
355#ifdef CONFIG_X86_32
356 /* Only happens when SEP is enabled, no need to test "SEP"arately */
357 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
358 tss->x86_tss.ss1 = thread->sysenter_cs;
359 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
360 }
361#endif
362}
Glauber de Oliveira Costa1b46cbe2008-01-30 13:31:27 +0100363
Glauber de Oliveira Costa7818a1e2008-01-30 13:31:31 +0100364#ifdef CONFIG_PARAVIRT
365#include <asm/paravirt.h>
366#else
Glauber de Oliveira Costac758ecf2008-01-30 13:31:03 +0100367#define __cpuid native_cpuid
Glauber de Oliveira Costa1b46cbe2008-01-30 13:31:27 +0100368#define paravirt_enabled() 0
369
370/*
371 * These special macros can be used to get or set a debugging register
372 */
373#define get_debugreg(var, register) \
374 (var) = native_get_debugreg(register)
375#define set_debugreg(value, register) \
376 native_set_debugreg(register, value)
377
Glauber de Oliveira Costa7818a1e2008-01-30 13:31:31 +0100378static inline void load_sp0(struct tss_struct *tss,
379 struct thread_struct *thread)
380{
381 native_load_sp0(tss, thread);
382}
383
Glauber de Oliveira Costa62d7d7e2008-01-30 13:31:27 +0100384#define set_iopl_mask native_set_iopl_mask
Glauber de Oliveira Costa1b46cbe2008-01-30 13:31:27 +0100385#endif /* CONFIG_PARAVIRT */
386
387/*
388 * Save the cr4 feature set we're using (ie
389 * Pentium 4MB enable and PPro Global page
390 * enable), so that any CPU's that boot up
391 * after us can get the correct flags.
392 */
393extern unsigned long mmu_cr4_features;
394
395static inline void set_in_cr4(unsigned long mask)
396{
397 unsigned cr4;
398 mmu_cr4_features |= mask;
399 cr4 = read_cr4();
400 cr4 |= mask;
401 write_cr4(cr4);
402}
403
404static inline void clear_in_cr4(unsigned long mask)
405{
406 unsigned cr4;
407 mmu_cr4_features &= ~mask;
408 cr4 = read_cr4();
409 cr4 &= ~mask;
410 write_cr4(cr4);
411}
412
Glauber de Oliveira Costa683e0252008-01-30 13:31:27 +0100413struct microcode_header {
414 unsigned int hdrver;
415 unsigned int rev;
416 unsigned int date;
417 unsigned int sig;
418 unsigned int cksum;
419 unsigned int ldrver;
420 unsigned int pf;
421 unsigned int datasize;
422 unsigned int totalsize;
423 unsigned int reserved[3];
424};
Glauber de Oliveira Costa1b46cbe2008-01-30 13:31:27 +0100425
Glauber de Oliveira Costa683e0252008-01-30 13:31:27 +0100426struct microcode {
427 struct microcode_header hdr;
428 unsigned int bits[0];
429};
430
431typedef struct microcode microcode_t;
432typedef struct microcode_header microcode_header_t;
433
434/* microcode format is extended from prescott processors */
435struct extended_signature {
436 unsigned int sig;
437 unsigned int pf;
438 unsigned int cksum;
439};
440
441struct extended_sigtable {
442 unsigned int count;
443 unsigned int cksum;
444 unsigned int reserved[3];
445 struct extended_signature sigs[0];
446};
447
Glauber de Oliveira Costafc87e902008-01-30 13:31:38 +0100448typedef struct {
449 unsigned long seg;
450} mm_segment_t;
451
452
Glauber de Oliveira Costa683e0252008-01-30 13:31:27 +0100453/*
454 * create a kernel thread without removing it from tasklists
455 */
456extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
457
458/* Free all resources held by a thread. */
459extern void release_thread(struct task_struct *);
460
461/* Prepare to copy thread state - unlazy all lazy status */
462extern void prepare_to_copy(struct task_struct *tsk);
463
464unsigned long get_wchan(struct task_struct *p);
Glauber de Oliveira Costac758ecf2008-01-30 13:31:03 +0100465
466/*
467 * Generic CPUID function
468 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
469 * resulting in stale register contents being returned.
470 */
471static inline void cpuid(unsigned int op,
472 unsigned int *eax, unsigned int *ebx,
473 unsigned int *ecx, unsigned int *edx)
474{
475 *eax = op;
476 *ecx = 0;
477 __cpuid(eax, ebx, ecx, edx);
478}
479
480/* Some CPUID calls want 'count' to be placed in ecx */
481static inline void cpuid_count(unsigned int op, int count,
482 unsigned int *eax, unsigned int *ebx,
483 unsigned int *ecx, unsigned int *edx)
484{
485 *eax = op;
486 *ecx = count;
487 __cpuid(eax, ebx, ecx, edx);
488}
489
490/*
491 * CPUID functions returning a single datum
492 */
493static inline unsigned int cpuid_eax(unsigned int op)
494{
495 unsigned int eax, ebx, ecx, edx;
496
497 cpuid(op, &eax, &ebx, &ecx, &edx);
498 return eax;
499}
500static inline unsigned int cpuid_ebx(unsigned int op)
501{
502 unsigned int eax, ebx, ecx, edx;
503
504 cpuid(op, &eax, &ebx, &ecx, &edx);
505 return ebx;
506}
507static inline unsigned int cpuid_ecx(unsigned int op)
508{
509 unsigned int eax, ebx, ecx, edx;
510
511 cpuid(op, &eax, &ebx, &ecx, &edx);
512 return ecx;
513}
514static inline unsigned int cpuid_edx(unsigned int op)
515{
516 unsigned int eax, ebx, ecx, edx;
517
518 cpuid(op, &eax, &ebx, &ecx, &edx);
519 return edx;
520}
521
Glauber de Oliveira Costa683e0252008-01-30 13:31:27 +0100522/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
523static inline void rep_nop(void)
524{
525 __asm__ __volatile__("rep;nop": : :"memory");
526}
527
528/* Stop speculative execution */
529static inline void sync_core(void)
530{
531 int tmp;
532 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
533 : "ebx", "ecx", "edx", "memory");
534}
535
536#define cpu_relax() rep_nop()
537
538static inline void __monitor(const void *eax, unsigned long ecx,
539 unsigned long edx)
540{
541 /* "monitor %eax,%ecx,%edx;" */
542 asm volatile(
543 ".byte 0x0f,0x01,0xc8;"
544 : :"a" (eax), "c" (ecx), "d"(edx));
545}
546
547static inline void __mwait(unsigned long eax, unsigned long ecx)
548{
549 /* "mwait %eax,%ecx;" */
550 asm volatile(
551 ".byte 0x0f,0x01,0xc9;"
552 : :"a" (eax), "c" (ecx));
553}
554
555static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
556{
557 /* "mwait %eax,%ecx;" */
558 asm volatile(
559 "sti; .byte 0x0f,0x01,0xc9;"
560 : :"a" (eax), "c" (ecx));
561}
562
563extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
564
565extern int force_mwait;
566
567extern void select_idle_routine(const struct cpuinfo_x86 *c);
568
569extern unsigned long boot_option_idle_override;
570
Glauber de Oliveira Costa1a539052008-01-30 13:31:39 +0100571extern void enable_sep_cpu(void);
572extern int sysenter_setup(void);
573
574/* Defined in head.S */
575extern struct desc_ptr early_gdt_descr;
576
577extern void cpu_set_gdt(int);
578extern void switch_to_new_gdt(void);
579extern void cpu_init(void);
580extern void init_gdt(int cpu);
581
582/* from system description table in BIOS. Mostly for MCA use, but
583 * others may find it useful. */
584extern unsigned int machine_id;
585extern unsigned int machine_submodel_id;
586extern unsigned int BIOS_revision;
587extern unsigned int mca_pentium_flag;
588
Glauber de Oliveira Costa683e0252008-01-30 13:31:27 +0100589/* Boot loader type from the setup header */
590extern int bootloader_type;
Glauber de Oliveira Costa1a539052008-01-30 13:31:39 +0100591
592extern char ignore_fpu_irq;
Glauber de Oliveira Costa683e0252008-01-30 13:31:27 +0100593#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
594
595#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
596#define ARCH_HAS_PREFETCHW
597#define ARCH_HAS_SPINLOCK_PREFETCH
598
Glauber de Oliveira Costaae2e15e2008-01-30 13:31:40 +0100599#ifdef CONFIG_X86_32
600#define BASE_PREFETCH ASM_NOP4
601#define ARCH_HAS_PREFETCH
602#else
603#define BASE_PREFETCH "prefetcht0 (%1)"
604#endif
605
606/* Prefetch instructions for Pentium III and AMD Athlon */
607/* It's not worth to care about 3dnow! prefetches for the K6
608 because they are microcoded there and very slow.
609 However we don't do prefetches for pre XP Athlons currently
610 That should be fixed. */
611static inline void prefetch(const void *x)
612{
613 alternative_input(BASE_PREFETCH,
614 "prefetchnta (%1)",
615 X86_FEATURE_XMM,
616 "r" (x));
617}
618
619/* 3dnow! prefetch to get an exclusive cache line. Useful for
620 spinlocks to avoid one state transition in the cache coherency protocol. */
621static inline void prefetchw(const void *x)
622{
623 alternative_input(BASE_PREFETCH,
624 "prefetchw (%1)",
625 X86_FEATURE_3DNOW,
626 "r" (x));
627}
628
Glauber de Oliveira Costa683e0252008-01-30 13:31:27 +0100629#define spin_lock_prefetch(x) prefetchw(x)
630/* This decides where the kernel will search for a free chunk of vm
631 * space during mmap's.
632 */
633#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
634
635#define KSTK_EIP(task) (task_pt_regs(task)->ip)
636
Glauber de Oliveira Costac758ecf2008-01-30 13:31:03 +0100637#endif