blob: 768fcb25900aca24d14edf5ead011bf70cbf4b3c [file] [log] [blame]
Roland McGrath1eeaed72008-01-30 13:31:51 +01001/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
9
H. Peter Anvin1965aae2008-10-22 22:26:29 -070010#ifndef _ASM_X86_I387_H
11#define _ASM_X86_I387_H
Roland McGrath1eeaed72008-01-30 13:31:51 +010012
Herbert Xu3b0d6592009-11-03 09:11:15 -050013#ifndef __ASSEMBLY__
14
Roland McGrath1eeaed72008-01-30 13:31:51 +010015#include <linux/sched.h>
16#include <linux/kernel_stat.h>
17#include <linux/regset.h>
Suresh Siddhae4914012008-08-13 22:02:26 +100018#include <linux/hardirq.h>
Avi Kivity86603282010-05-06 11:45:46 +030019#include <linux/slab.h>
H. Peter Anvin92c37fa2008-02-04 16:47:58 +010020#include <asm/asm.h>
H. Peter Anvinc9775b42010-05-11 17:49:54 -070021#include <asm/cpufeature.h>
Roland McGrath1eeaed72008-01-30 13:31:51 +010022#include <asm/processor.h>
23#include <asm/sigcontext.h>
24#include <asm/user.h>
25#include <asm/uaccess.h>
Suresh Siddhadc1e35c2008-07-29 10:29:19 -070026#include <asm/xsave.h>
Roland McGrath1eeaed72008-01-30 13:31:51 +010027
Suresh Siddha3c1c7f12008-07-29 10:29:21 -070028extern unsigned int sig_xstate_size;
Roland McGrath1eeaed72008-01-30 13:31:51 +010029extern void fpu_init(void);
Roland McGrath1eeaed72008-01-30 13:31:51 +010030extern void mxcsr_feature_mask_init(void);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070031extern int init_fpu(struct task_struct *child);
Roland McGrath1eeaed72008-01-30 13:31:51 +010032extern asmlinkage void math_state_restore(void);
Jeremy Fitzhardingee6e9cac2009-04-24 00:40:59 -070033extern void __math_state_restore(void);
Jaswinder Singh36454932008-07-21 22:31:57 +053034extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
Roland McGrath1eeaed72008-01-30 13:31:51 +010035
36extern user_regset_active_fn fpregs_active, xfpregs_active;
Suresh Siddha5b3efd52010-02-11 11:50:59 -080037extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
38 xstateregs_get;
39extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
40 xstateregs_set;
41
42/*
43 * xstateregs_active == fpregs_active. Please refer to the comment
44 * at the definition of fpregs_active.
45 */
46#define xstateregs_active fpregs_active
Roland McGrath1eeaed72008-01-30 13:31:51 +010047
Suresh Siddhac37b5ef2008-07-29 10:29:25 -070048extern struct _fpx_sw_bytes fx_sw_reserved;
Roland McGrath1eeaed72008-01-30 13:31:51 +010049#ifdef CONFIG_IA32_EMULATION
Suresh Siddha3c1c7f12008-07-29 10:29:21 -070050extern unsigned int sig_xstate_ia32_size;
Suresh Siddhac37b5ef2008-07-29 10:29:25 -070051extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
Roland McGrath1eeaed72008-01-30 13:31:51 +010052struct _fpstate_ia32;
Suresh Siddhaab513702008-07-29 10:29:22 -070053struct _xstate_ia32;
54extern int save_i387_xstate_ia32(void __user *buf);
55extern int restore_i387_xstate_ia32(void __user *buf);
Roland McGrath1eeaed72008-01-30 13:31:51 +010056#endif
57
Suresh Siddhab359e8a2008-07-29 10:29:20 -070058#define X87_FSW_ES (1 << 7) /* Exception Summary */
59
Suresh Siddha29104e12010-07-19 16:05:49 -070060static __always_inline __pure bool use_xsaveopt(void)
61{
Suresh Siddha6bad06b2010-07-19 16:05:52 -070062 return static_cpu_has(X86_FEATURE_XSAVEOPT);
Suresh Siddha29104e12010-07-19 16:05:49 -070063}
64
H. Peter Anvinc9775b42010-05-11 17:49:54 -070065static __always_inline __pure bool use_xsave(void)
Avi Kivityc9ad4882010-05-06 11:45:45 +030066{
H. Peter Anvinc9775b42010-05-11 17:49:54 -070067 return static_cpu_has(X86_FEATURE_XSAVE);
Avi Kivityc9ad4882010-05-06 11:45:45 +030068}
69
Suresh Siddha29104e12010-07-19 16:05:49 -070070extern void __sanitize_i387_state(struct task_struct *);
71
72static inline void sanitize_i387_state(struct task_struct *tsk)
73{
74 if (!use_xsaveopt())
75 return;
76 __sanitize_i387_state(tsk);
77}
78
Roland McGrath1eeaed72008-01-30 13:31:51 +010079#ifdef CONFIG_X86_64
Suresh Siddhab359e8a2008-07-29 10:29:20 -070080static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
Roland McGrath1eeaed72008-01-30 13:31:51 +010081{
82 int err;
83
Brian Gerst82024132010-09-03 21:17:14 -040084 /* See comment in fxsave() below. */
Roland McGrath1eeaed72008-01-30 13:31:51 +010085 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
86 "2:\n"
87 ".section .fixup,\"ax\"\n"
88 "3: movl $-1,%[err]\n"
89 " jmp 2b\n"
90 ".previous\n"
Joe Perchesaffe6632008-03-23 01:02:18 -070091 _ASM_EXTABLE(1b, 3b)
Roland McGrath1eeaed72008-01-30 13:31:51 +010092 : [err] "=r" (err)
Brian Gerst82024132010-09-03 21:17:14 -040093 : [fx] "R" (fx), "m" (*fx), "0" (0));
Roland McGrath1eeaed72008-01-30 13:31:51 +010094 return err;
95}
96
Roland McGrath1eeaed72008-01-30 13:31:51 +010097/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
98 is pending. Clear the x87 state here by setting it to fixed
99 values. The kernel data segment can be sometimes 0 and sometimes
100 new user value. Both should be ok.
101 Use the PDA as safe address because it should be already in L1. */
Avi Kivity86603282010-05-06 11:45:46 +0300102static inline void fpu_clear(struct fpu *fpu)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100103{
Avi Kivity86603282010-05-06 11:45:46 +0300104 struct xsave_struct *xstate = &fpu->state->xsave;
105 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700106
107 /*
108 * xsave header may indicate the init state of the FP.
109 */
Avi Kivityc9ad4882010-05-06 11:45:45 +0300110 if (use_xsave() &&
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700111 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
112 return;
113
Roland McGrath1eeaed72008-01-30 13:31:51 +0100114 if (unlikely(fx->swd & X87_FSW_ES))
Joe Perchesaffe6632008-03-23 01:02:18 -0700115 asm volatile("fnclex");
Roland McGrath1eeaed72008-01-30 13:31:51 +0100116 alternative_input(ASM_NOP8 ASM_NOP2,
Joe Perchesaffe6632008-03-23 01:02:18 -0700117 " emms\n" /* clear stack tags */
118 " fildl %%gs:0", /* load to clear state */
119 X86_FEATURE_FXSAVE_LEAK);
Roland McGrath1eeaed72008-01-30 13:31:51 +0100120}
121
Avi Kivity86603282010-05-06 11:45:46 +0300122static inline void clear_fpu_state(struct task_struct *tsk)
123{
124 fpu_clear(&tsk->thread.fpu);
125}
126
Suresh Siddhac37b5ef2008-07-29 10:29:25 -0700127static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100128{
129 int err;
130
Suresh Siddha8e221b62010-06-22 16:23:37 -0700131 /*
132 * Clear the bytes not touched by the fxsave and reserved
133 * for the SW usage.
134 */
135 err = __clear_user(&fx->sw_reserved,
136 sizeof(struct _fpx_sw_bytes));
137 if (unlikely(err))
138 return -EFAULT;
139
Brian Gerst82024132010-09-03 21:17:14 -0400140 /* See comment in fxsave() below. */
Roland McGrath1eeaed72008-01-30 13:31:51 +0100141 asm volatile("1: rex64/fxsave (%[fx])\n\t"
142 "2:\n"
143 ".section .fixup,\"ax\"\n"
144 "3: movl $-1,%[err]\n"
145 " jmp 2b\n"
146 ".previous\n"
Joe Perchesaffe6632008-03-23 01:02:18 -0700147 _ASM_EXTABLE(1b, 3b)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100148 : [err] "=r" (err), "=m" (*fx)
Brian Gerst82024132010-09-03 21:17:14 -0400149 : [fx] "R" (fx), "0" (0));
Joe Perchesaffe6632008-03-23 01:02:18 -0700150 if (unlikely(err) &&
151 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
Roland McGrath1eeaed72008-01-30 13:31:51 +0100152 err = -EFAULT;
153 /* No need to clear here because the caller clears USED_MATH */
154 return err;
155}
156
Avi Kivity86603282010-05-06 11:45:46 +0300157static inline void fpu_fxsave(struct fpu *fpu)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100158{
159 /* Using "rex64; fxsave %0" is broken because, if the memory operand
160 uses any extended registers for addressing, a second REX prefix
161 will be generated (to the assembler, rex64 followed by semicolon
Brian Gerst82024132010-09-03 21:17:14 -0400162 is a separate instruction), and hence the 64-bitness is lost.
163 Using "fxsaveq %0" would be the ideal choice, but is only supported
164 starting with gas 2.16.
165 asm volatile("fxsaveq %0"
166 : "=m" (fpu->state->fxsave));
167 Using, as a workaround, the properly prefixed form below isn't
Roland McGrath1eeaed72008-01-30 13:31:51 +0100168 accepted by any binutils version so far released, complaining that
169 the same type of prefix is used twice if an extended register is
Brian Gerst82024132010-09-03 21:17:14 -0400170 needed for addressing (fix submitted to mainline 2005-11-21).
171 asm volatile("rex64/fxsave %0"
172 : "=m" (fpu->state->fxsave));
173 This, however, we can work around by forcing the compiler to select
Roland McGrath1eeaed72008-01-30 13:31:51 +0100174 an addressing mode that doesn't require extended registers. */
Brian Gerst82024132010-09-03 21:17:14 -0400175 asm volatile("rex64/fxsave (%[fx])"
176 : "=m" (fpu->state->fxsave)
177 : [fx] "R" (&fpu->state->fxsave));
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700178}
179
Avi Kivity86603282010-05-06 11:45:46 +0300180static inline void fpu_save_init(struct fpu *fpu)
181{
182 if (use_xsave())
183 fpu_xsave(fpu);
184 else
185 fpu_fxsave(fpu);
186
187 fpu_clear(fpu);
188}
189
Roland McGrath1eeaed72008-01-30 13:31:51 +0100190#else /* CONFIG_X86_32 */
191
Daniel Glöcknerab9e1852009-03-04 19:42:27 +0100192#ifdef CONFIG_MATH_EMULATION
Avi Kivity86603282010-05-06 11:45:46 +0300193extern void finit_soft_fpu(struct i387_soft_struct *soft);
Daniel Glöcknerab9e1852009-03-04 19:42:27 +0100194#else
Avi Kivity86603282010-05-06 11:45:46 +0300195static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
Daniel Glöcknerab9e1852009-03-04 19:42:27 +0100196#endif
Suresh Siddhae8a496a2008-05-23 16:26:37 -0700197
Jiri Slaby34ba4762009-04-08 13:31:59 +0200198/* perform fxrstor iff the processor has extended states, otherwise frstor */
199static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100200{
201 /*
202 * The "nop" is needed to make the instructions the same
203 * length.
204 */
205 alternative_input(
206 "nop ; frstor %1",
207 "fxrstor %1",
208 X86_FEATURE_FXSR,
Jiri Slaby34ba4762009-04-08 13:31:59 +0200209 "m" (*fx));
210
Jiri Slabyfcb2ac52009-04-08 13:31:58 +0200211 return 0;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100212}
213
214/* We need a safe address that is cheap to find and that is already
215 in L1 during context switch. The best choices are unfortunately
216 different for UP and SMP */
217#ifdef CONFIG_SMP
218#define safe_address (__per_cpu_offset[0])
219#else
220#define safe_address (kstat_cpu(0).cpustat.user)
221#endif
222
223/*
224 * These must be called with preempt disabled
225 */
Avi Kivity86603282010-05-06 11:45:46 +0300226static inline void fpu_save_init(struct fpu *fpu)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100227{
Avi Kivityc9ad4882010-05-06 11:45:45 +0300228 if (use_xsave()) {
Avi Kivity86603282010-05-06 11:45:46 +0300229 struct xsave_struct *xstate = &fpu->state->xsave;
230 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700231
Avi Kivity86603282010-05-06 11:45:46 +0300232 fpu_xsave(fpu);
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700233
234 /*
235 * xsave header may indicate the init state of the FP.
236 */
237 if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
238 goto end;
239
240 if (unlikely(fx->swd & X87_FSW_ES))
241 asm volatile("fnclex");
242
243 /*
244 * we can do a simple return here or be paranoid :)
245 */
246 goto clear_state;
247 }
248
Roland McGrath1eeaed72008-01-30 13:31:51 +0100249 /* Use more nops than strictly needed in case the compiler
250 varies code */
251 alternative_input(
252 "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
253 "fxsave %[fx]\n"
254 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
255 X86_FEATURE_FXSR,
Avi Kivity86603282010-05-06 11:45:46 +0300256 [fx] "m" (fpu->state->fxsave),
257 [fsw] "m" (fpu->state->fxsave.swd) : "memory");
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700258clear_state:
Roland McGrath1eeaed72008-01-30 13:31:51 +0100259 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
260 is pending. Clear the x87 state here by setting it to fixed
261 values. safe_address is a random variable that should be in L1 */
262 alternative_input(
263 GENERIC_NOP8 GENERIC_NOP2,
264 "emms\n\t" /* clear stack tags */
265 "fildl %[addr]", /* set F?P to defined value */
266 X86_FEATURE_FXSAVE_LEAK,
267 [addr] "m" (safe_address));
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700268end:
Avi Kivity86603282010-05-06 11:45:46 +0300269 ;
270}
271
Brian Gerstbfd946c2010-09-03 21:17:11 -0400272#endif /* CONFIG_X86_64 */
273
Avi Kivity86603282010-05-06 11:45:46 +0300274static inline void __save_init_fpu(struct task_struct *tsk)
275{
276 fpu_save_init(&tsk->thread.fpu);
Roland McGrath1eeaed72008-01-30 13:31:51 +0100277 task_thread_info(tsk)->status &= ~TS_USEDFPU;
278}
279
Avi Kivity86603282010-05-06 11:45:46 +0300280static inline int fpu_fxrstor_checking(struct fpu *fpu)
281{
282 return fxrstor_checking(&fpu->state->fxsave);
283}
284
285static inline int fpu_restore_checking(struct fpu *fpu)
286{
287 if (use_xsave())
288 return fpu_xrstor_checking(fpu);
289 else
290 return fpu_fxrstor_checking(fpu);
291}
292
Jiri Slaby34ba4762009-04-08 13:31:59 +0200293static inline int restore_fpu_checking(struct task_struct *tsk)
294{
Avi Kivity86603282010-05-06 11:45:46 +0300295 return fpu_restore_checking(&tsk->thread.fpu);
Jiri Slaby34ba4762009-04-08 13:31:59 +0200296}
297
Roland McGrath1eeaed72008-01-30 13:31:51 +0100298/*
299 * Signal frame handlers...
300 */
Suresh Siddhaab513702008-07-29 10:29:22 -0700301extern int save_i387_xstate(void __user *buf);
302extern int restore_i387_xstate(void __user *buf);
Roland McGrath1eeaed72008-01-30 13:31:51 +0100303
304static inline void __unlazy_fpu(struct task_struct *tsk)
305{
306 if (task_thread_info(tsk)->status & TS_USEDFPU) {
307 __save_init_fpu(tsk);
308 stts();
309 } else
310 tsk->fpu_counter = 0;
311}
312
313static inline void __clear_fpu(struct task_struct *tsk)
314{
315 if (task_thread_info(tsk)->status & TS_USEDFPU) {
Brian Gerst51115d42010-09-03 21:17:10 -0400316 /* Ignore delayed exceptions from user space */
317 asm volatile("1: fwait\n"
318 "2:\n"
319 _ASM_EXTABLE(1b, 2b));
Roland McGrath1eeaed72008-01-30 13:31:51 +0100320 task_thread_info(tsk)->status &= ~TS_USEDFPU;
321 stts();
322 }
323}
324
325static inline void kernel_fpu_begin(void)
326{
327 struct thread_info *me = current_thread_info();
328 preempt_disable();
329 if (me->status & TS_USEDFPU)
330 __save_init_fpu(me->task);
331 else
332 clts();
333}
334
335static inline void kernel_fpu_end(void)
336{
337 stts();
338 preempt_enable();
339}
340
Huang Yingae4b6882009-08-31 13:11:54 +0800341static inline bool irq_fpu_usable(void)
342{
343 struct pt_regs *regs;
344
345 return !in_interrupt() || !(regs = get_irq_regs()) || \
346 user_mode(regs) || (read_cr0() & X86_CR0_TS);
347}
348
Suresh Siddhae4914012008-08-13 22:02:26 +1000349/*
350 * Some instructions like VIA's padlock instructions generate a spurious
351 * DNA fault but don't modify SSE registers. And these instructions
Chuck Ebbert0b8c3d52009-06-09 10:40:50 -0400352 * get used from interrupt context as well. To prevent these kernel instructions
353 * in interrupt context interacting wrongly with other user/kernel fpu usage, we
Suresh Siddhae4914012008-08-13 22:02:26 +1000354 * should use them only in the context of irq_ts_save/restore()
355 */
356static inline int irq_ts_save(void)
357{
358 /*
Chuck Ebbert0b8c3d52009-06-09 10:40:50 -0400359 * If in process context and not atomic, we can take a spurious DNA fault.
360 * Otherwise, doing clts() in process context requires disabling preemption
361 * or some heavy lifting like kernel_fpu_begin()
Suresh Siddhae4914012008-08-13 22:02:26 +1000362 */
Chuck Ebbert0b8c3d52009-06-09 10:40:50 -0400363 if (!in_atomic())
Suresh Siddhae4914012008-08-13 22:02:26 +1000364 return 0;
365
366 if (read_cr0() & X86_CR0_TS) {
367 clts();
368 return 1;
369 }
370
371 return 0;
372}
373
374static inline void irq_ts_restore(int TS_state)
375{
376 if (TS_state)
377 stts();
378}
379
Roland McGrath1eeaed72008-01-30 13:31:51 +0100380/*
381 * These disable preemption on their own and are safe
382 */
383static inline void save_init_fpu(struct task_struct *tsk)
384{
385 preempt_disable();
386 __save_init_fpu(tsk);
387 stts();
388 preempt_enable();
389}
390
391static inline void unlazy_fpu(struct task_struct *tsk)
392{
393 preempt_disable();
394 __unlazy_fpu(tsk);
395 preempt_enable();
396}
397
398static inline void clear_fpu(struct task_struct *tsk)
399{
400 preempt_disable();
401 __clear_fpu(tsk);
402 preempt_enable();
403}
404
Roland McGrath1eeaed72008-01-30 13:31:51 +0100405/*
Roland McGrath1eeaed72008-01-30 13:31:51 +0100406 * i387 state interaction
407 */
408static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
409{
410 if (cpu_has_fxsr) {
Avi Kivity86603282010-05-06 11:45:46 +0300411 return tsk->thread.fpu.state->fxsave.cwd;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100412 } else {
Avi Kivity86603282010-05-06 11:45:46 +0300413 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100414 }
415}
416
417static inline unsigned short get_fpu_swd(struct task_struct *tsk)
418{
419 if (cpu_has_fxsr) {
Avi Kivity86603282010-05-06 11:45:46 +0300420 return tsk->thread.fpu.state->fxsave.swd;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100421 } else {
Avi Kivity86603282010-05-06 11:45:46 +0300422 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100423 }
424}
425
426static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
427{
428 if (cpu_has_xmm) {
Avi Kivity86603282010-05-06 11:45:46 +0300429 return tsk->thread.fpu.state->fxsave.mxcsr;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100430 } else {
431 return MXCSR_DEFAULT;
432 }
433}
434
Avi Kivity86603282010-05-06 11:45:46 +0300435static bool fpu_allocated(struct fpu *fpu)
436{
437 return fpu->state != NULL;
438}
439
440static inline int fpu_alloc(struct fpu *fpu)
441{
442 if (fpu_allocated(fpu))
443 return 0;
444 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
445 if (!fpu->state)
446 return -ENOMEM;
447 WARN_ON((unsigned long)fpu->state & 15);
448 return 0;
449}
450
451static inline void fpu_free(struct fpu *fpu)
452{
453 if (fpu->state) {
454 kmem_cache_free(task_xstate_cachep, fpu->state);
455 fpu->state = NULL;
456 }
457}
458
459static inline void fpu_copy(struct fpu *dst, struct fpu *src)
460{
461 memcpy(dst->state, src->state, xstate_size);
462}
463
Sheng Yang5ee481d2010-05-17 17:22:23 +0800464extern void fpu_finit(struct fpu *fpu);
465
Herbert Xu3b0d6592009-11-03 09:11:15 -0500466#endif /* __ASSEMBLY__ */
467
468#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
469#define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5
470
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700471#endif /* _ASM_X86_I387_H */