blob: 815c5b2b9f57de3011c4cbfa1aba663a2c0033f1 [file] [log] [blame]
Roland McGrath1eeaed72008-01-30 13:31:51 +01001/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
9
H. Peter Anvin1965aae2008-10-22 22:26:29 -070010#ifndef _ASM_X86_I387_H
11#define _ASM_X86_I387_H
Roland McGrath1eeaed72008-01-30 13:31:51 +010012
Herbert Xu3b0d6592009-11-03 09:11:15 -050013#ifndef __ASSEMBLY__
14
Roland McGrath1eeaed72008-01-30 13:31:51 +010015#include <linux/sched.h>
16#include <linux/kernel_stat.h>
17#include <linux/regset.h>
Suresh Siddhae4914012008-08-13 22:02:26 +100018#include <linux/hardirq.h>
Avi Kivity86603282010-05-06 11:45:46 +030019#include <linux/slab.h>
H. Peter Anvin92c37fa2008-02-04 16:47:58 +010020#include <asm/asm.h>
H. Peter Anvinc9775b42010-05-11 17:49:54 -070021#include <asm/cpufeature.h>
Roland McGrath1eeaed72008-01-30 13:31:51 +010022#include <asm/processor.h>
23#include <asm/sigcontext.h>
24#include <asm/user.h>
25#include <asm/uaccess.h>
Suresh Siddhadc1e35c2008-07-29 10:29:19 -070026#include <asm/xsave.h>
Roland McGrath1eeaed72008-01-30 13:31:51 +010027
Suresh Siddha3c1c7f12008-07-29 10:29:21 -070028extern unsigned int sig_xstate_size;
Roland McGrath1eeaed72008-01-30 13:31:51 +010029extern void fpu_init(void);
Roland McGrath1eeaed72008-01-30 13:31:51 +010030extern void mxcsr_feature_mask_init(void);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070031extern int init_fpu(struct task_struct *child);
Roland McGrath1eeaed72008-01-30 13:31:51 +010032extern asmlinkage void math_state_restore(void);
Jeremy Fitzhardingee6e9cac2009-04-24 00:40:59 -070033extern void __math_state_restore(void);
Suresh Siddha61c46282008-03-10 15:28:04 -070034extern void init_thread_xstate(void);
Jaswinder Singh36454932008-07-21 22:31:57 +053035extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
Roland McGrath1eeaed72008-01-30 13:31:51 +010036
37extern user_regset_active_fn fpregs_active, xfpregs_active;
Suresh Siddha5b3efd52010-02-11 11:50:59 -080038extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
39 xstateregs_get;
40extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
41 xstateregs_set;
42
43/*
44 * xstateregs_active == fpregs_active. Please refer to the comment
45 * at the definition of fpregs_active.
46 */
47#define xstateregs_active fpregs_active
Roland McGrath1eeaed72008-01-30 13:31:51 +010048
Suresh Siddhac37b5ef2008-07-29 10:29:25 -070049extern struct _fpx_sw_bytes fx_sw_reserved;
Roland McGrath1eeaed72008-01-30 13:31:51 +010050#ifdef CONFIG_IA32_EMULATION
Suresh Siddha3c1c7f12008-07-29 10:29:21 -070051extern unsigned int sig_xstate_ia32_size;
Suresh Siddhac37b5ef2008-07-29 10:29:25 -070052extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
Roland McGrath1eeaed72008-01-30 13:31:51 +010053struct _fpstate_ia32;
Suresh Siddhaab513702008-07-29 10:29:22 -070054struct _xstate_ia32;
55extern int save_i387_xstate_ia32(void __user *buf);
56extern int restore_i387_xstate_ia32(void __user *buf);
Roland McGrath1eeaed72008-01-30 13:31:51 +010057#endif
58
Suresh Siddhab359e8a2008-07-29 10:29:20 -070059#define X87_FSW_ES (1 << 7) /* Exception Summary */
60
H. Peter Anvinc9775b42010-05-11 17:49:54 -070061static __always_inline __pure bool use_xsave(void)
Avi Kivityc9ad4882010-05-06 11:45:45 +030062{
H. Peter Anvinc9775b42010-05-11 17:49:54 -070063 return static_cpu_has(X86_FEATURE_XSAVE);
Avi Kivityc9ad4882010-05-06 11:45:45 +030064}
65
Roland McGrath1eeaed72008-01-30 13:31:51 +010066#ifdef CONFIG_X86_64
67
68/* Ignore delayed exceptions from user space */
69static inline void tolerant_fwait(void)
70{
71 asm volatile("1: fwait\n"
72 "2:\n"
Joe Perchesaffe6632008-03-23 01:02:18 -070073 _ASM_EXTABLE(1b, 2b));
Roland McGrath1eeaed72008-01-30 13:31:51 +010074}
75
Suresh Siddhab359e8a2008-07-29 10:29:20 -070076static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
Roland McGrath1eeaed72008-01-30 13:31:51 +010077{
78 int err;
79
80 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
81 "2:\n"
82 ".section .fixup,\"ax\"\n"
83 "3: movl $-1,%[err]\n"
84 " jmp 2b\n"
85 ".previous\n"
Joe Perchesaffe6632008-03-23 01:02:18 -070086 _ASM_EXTABLE(1b, 3b)
Roland McGrath1eeaed72008-01-30 13:31:51 +010087 : [err] "=r" (err)
Jiri Slaby4ecf4582009-04-08 13:32:00 +020088#if 0 /* See comment in fxsave() below. */
Roland McGrath1eeaed72008-01-30 13:31:51 +010089 : [fx] "r" (fx), "m" (*fx), "0" (0));
90#else
91 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
92#endif
Roland McGrath1eeaed72008-01-30 13:31:51 +010093 return err;
94}
95
Roland McGrath1eeaed72008-01-30 13:31:51 +010096/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
97 is pending. Clear the x87 state here by setting it to fixed
98 values. The kernel data segment can be sometimes 0 and sometimes
99 new user value. Both should be ok.
100 Use the PDA as safe address because it should be already in L1. */
Avi Kivity86603282010-05-06 11:45:46 +0300101static inline void fpu_clear(struct fpu *fpu)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100102{
Avi Kivity86603282010-05-06 11:45:46 +0300103 struct xsave_struct *xstate = &fpu->state->xsave;
104 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700105
106 /*
107 * xsave header may indicate the init state of the FP.
108 */
Avi Kivityc9ad4882010-05-06 11:45:45 +0300109 if (use_xsave() &&
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700110 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
111 return;
112
Roland McGrath1eeaed72008-01-30 13:31:51 +0100113 if (unlikely(fx->swd & X87_FSW_ES))
Joe Perchesaffe6632008-03-23 01:02:18 -0700114 asm volatile("fnclex");
Roland McGrath1eeaed72008-01-30 13:31:51 +0100115 alternative_input(ASM_NOP8 ASM_NOP2,
Joe Perchesaffe6632008-03-23 01:02:18 -0700116 " emms\n" /* clear stack tags */
117 " fildl %%gs:0", /* load to clear state */
118 X86_FEATURE_FXSAVE_LEAK);
Roland McGrath1eeaed72008-01-30 13:31:51 +0100119}
120
Avi Kivity86603282010-05-06 11:45:46 +0300121static inline void clear_fpu_state(struct task_struct *tsk)
122{
123 fpu_clear(&tsk->thread.fpu);
124}
125
Suresh Siddhac37b5ef2008-07-29 10:29:25 -0700126static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100127{
128 int err;
129
130 asm volatile("1: rex64/fxsave (%[fx])\n\t"
131 "2:\n"
132 ".section .fixup,\"ax\"\n"
133 "3: movl $-1,%[err]\n"
134 " jmp 2b\n"
135 ".previous\n"
Joe Perchesaffe6632008-03-23 01:02:18 -0700136 _ASM_EXTABLE(1b, 3b)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100137 : [err] "=r" (err), "=m" (*fx)
Jiri Slaby4ecf4582009-04-08 13:32:00 +0200138#if 0 /* See comment in fxsave() below. */
Roland McGrath1eeaed72008-01-30 13:31:51 +0100139 : [fx] "r" (fx), "0" (0));
140#else
141 : [fx] "cdaSDb" (fx), "0" (0));
142#endif
Joe Perchesaffe6632008-03-23 01:02:18 -0700143 if (unlikely(err) &&
144 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
Roland McGrath1eeaed72008-01-30 13:31:51 +0100145 err = -EFAULT;
146 /* No need to clear here because the caller clears USED_MATH */
147 return err;
148}
149
Avi Kivity86603282010-05-06 11:45:46 +0300150static inline void fpu_fxsave(struct fpu *fpu)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100151{
152 /* Using "rex64; fxsave %0" is broken because, if the memory operand
153 uses any extended registers for addressing, a second REX prefix
154 will be generated (to the assembler, rex64 followed by semicolon
155 is a separate instruction), and hence the 64-bitness is lost. */
156#if 0
157 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
158 starting with gas 2.16. */
159 __asm__ __volatile__("fxsaveq %0"
Avi Kivity86603282010-05-06 11:45:46 +0300160 : "=m" (fpu->state->fxsave));
Roland McGrath1eeaed72008-01-30 13:31:51 +0100161#elif 0
162 /* Using, as a workaround, the properly prefixed form below isn't
163 accepted by any binutils version so far released, complaining that
164 the same type of prefix is used twice if an extended register is
165 needed for addressing (fix submitted to mainline 2005-11-21). */
166 __asm__ __volatile__("rex64/fxsave %0"
Avi Kivity86603282010-05-06 11:45:46 +0300167 : "=m" (fpu->state->fxsave));
Roland McGrath1eeaed72008-01-30 13:31:51 +0100168#else
169 /* This, however, we can work around by forcing the compiler to select
170 an addressing mode that doesn't require extended registers. */
Suresh Siddha61c46282008-03-10 15:28:04 -0700171 __asm__ __volatile__("rex64/fxsave (%1)"
Avi Kivity86603282010-05-06 11:45:46 +0300172 : "=m" (fpu->state->fxsave)
173 : "cdaSDb" (&fpu->state->fxsave));
Roland McGrath1eeaed72008-01-30 13:31:51 +0100174#endif
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700175}
176
Avi Kivity86603282010-05-06 11:45:46 +0300177static inline void fpu_save_init(struct fpu *fpu)
178{
179 if (use_xsave())
180 fpu_xsave(fpu);
181 else
182 fpu_fxsave(fpu);
183
184 fpu_clear(fpu);
185}
186
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700187static inline void __save_init_fpu(struct task_struct *tsk)
188{
Avi Kivity86603282010-05-06 11:45:46 +0300189 fpu_save_init(&tsk->thread.fpu);
Roland McGrath1eeaed72008-01-30 13:31:51 +0100190 task_thread_info(tsk)->status &= ~TS_USEDFPU;
191}
192
Roland McGrath1eeaed72008-01-30 13:31:51 +0100193#else /* CONFIG_X86_32 */
194
Daniel Glöcknerab9e1852009-03-04 19:42:27 +0100195#ifdef CONFIG_MATH_EMULATION
Avi Kivity86603282010-05-06 11:45:46 +0300196extern void finit_soft_fpu(struct i387_soft_struct *soft);
Daniel Glöcknerab9e1852009-03-04 19:42:27 +0100197#else
Avi Kivity86603282010-05-06 11:45:46 +0300198static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
Daniel Glöcknerab9e1852009-03-04 19:42:27 +0100199#endif
Suresh Siddhae8a496a2008-05-23 16:26:37 -0700200
Roland McGrath1eeaed72008-01-30 13:31:51 +0100201static inline void tolerant_fwait(void)
202{
203 asm volatile("fnclex ; fwait");
204}
205
Jiri Slaby34ba4762009-04-08 13:31:59 +0200206/* perform fxrstor iff the processor has extended states, otherwise frstor */
207static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100208{
209 /*
210 * The "nop" is needed to make the instructions the same
211 * length.
212 */
213 alternative_input(
214 "nop ; frstor %1",
215 "fxrstor %1",
216 X86_FEATURE_FXSR,
Jiri Slaby34ba4762009-04-08 13:31:59 +0200217 "m" (*fx));
218
Jiri Slabyfcb2ac52009-04-08 13:31:58 +0200219 return 0;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100220}
221
222/* We need a safe address that is cheap to find and that is already
223 in L1 during context switch. The best choices are unfortunately
224 different for UP and SMP */
225#ifdef CONFIG_SMP
226#define safe_address (__per_cpu_offset[0])
227#else
228#define safe_address (kstat_cpu(0).cpustat.user)
229#endif
230
231/*
232 * These must be called with preempt disabled
233 */
Avi Kivity86603282010-05-06 11:45:46 +0300234static inline void fpu_save_init(struct fpu *fpu)
Roland McGrath1eeaed72008-01-30 13:31:51 +0100235{
Avi Kivityc9ad4882010-05-06 11:45:45 +0300236 if (use_xsave()) {
Avi Kivity86603282010-05-06 11:45:46 +0300237 struct xsave_struct *xstate = &fpu->state->xsave;
238 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700239
Avi Kivity86603282010-05-06 11:45:46 +0300240 fpu_xsave(fpu);
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700241
242 /*
243 * xsave header may indicate the init state of the FP.
244 */
245 if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
246 goto end;
247
248 if (unlikely(fx->swd & X87_FSW_ES))
249 asm volatile("fnclex");
250
251 /*
252 * we can do a simple return here or be paranoid :)
253 */
254 goto clear_state;
255 }
256
Roland McGrath1eeaed72008-01-30 13:31:51 +0100257 /* Use more nops than strictly needed in case the compiler
258 varies code */
259 alternative_input(
260 "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
261 "fxsave %[fx]\n"
262 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
263 X86_FEATURE_FXSR,
Avi Kivity86603282010-05-06 11:45:46 +0300264 [fx] "m" (fpu->state->fxsave),
265 [fsw] "m" (fpu->state->fxsave.swd) : "memory");
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700266clear_state:
Roland McGrath1eeaed72008-01-30 13:31:51 +0100267 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
268 is pending. Clear the x87 state here by setting it to fixed
269 values. safe_address is a random variable that should be in L1 */
270 alternative_input(
271 GENERIC_NOP8 GENERIC_NOP2,
272 "emms\n\t" /* clear stack tags */
273 "fildl %[addr]", /* set F?P to defined value */
274 X86_FEATURE_FXSAVE_LEAK,
275 [addr] "m" (safe_address));
Suresh Siddhab359e8a2008-07-29 10:29:20 -0700276end:
Avi Kivity86603282010-05-06 11:45:46 +0300277 ;
278}
279
280static inline void __save_init_fpu(struct task_struct *tsk)
281{
282 fpu_save_init(&tsk->thread.fpu);
Roland McGrath1eeaed72008-01-30 13:31:51 +0100283 task_thread_info(tsk)->status &= ~TS_USEDFPU;
284}
285
Avi Kivity86603282010-05-06 11:45:46 +0300286
Suresh Siddhaab513702008-07-29 10:29:22 -0700287#endif /* CONFIG_X86_64 */
288
Avi Kivity86603282010-05-06 11:45:46 +0300289static inline int fpu_fxrstor_checking(struct fpu *fpu)
290{
291 return fxrstor_checking(&fpu->state->fxsave);
292}
293
294static inline int fpu_restore_checking(struct fpu *fpu)
295{
296 if (use_xsave())
297 return fpu_xrstor_checking(fpu);
298 else
299 return fpu_fxrstor_checking(fpu);
300}
301
Jiri Slaby34ba4762009-04-08 13:31:59 +0200302static inline int restore_fpu_checking(struct task_struct *tsk)
303{
Avi Kivity86603282010-05-06 11:45:46 +0300304 return fpu_restore_checking(&tsk->thread.fpu);
Jiri Slaby34ba4762009-04-08 13:31:59 +0200305}
306
Roland McGrath1eeaed72008-01-30 13:31:51 +0100307/*
308 * Signal frame handlers...
309 */
Suresh Siddhaab513702008-07-29 10:29:22 -0700310extern int save_i387_xstate(void __user *buf);
311extern int restore_i387_xstate(void __user *buf);
Roland McGrath1eeaed72008-01-30 13:31:51 +0100312
313static inline void __unlazy_fpu(struct task_struct *tsk)
314{
315 if (task_thread_info(tsk)->status & TS_USEDFPU) {
316 __save_init_fpu(tsk);
317 stts();
318 } else
319 tsk->fpu_counter = 0;
320}
321
322static inline void __clear_fpu(struct task_struct *tsk)
323{
324 if (task_thread_info(tsk)->status & TS_USEDFPU) {
325 tolerant_fwait();
326 task_thread_info(tsk)->status &= ~TS_USEDFPU;
327 stts();
328 }
329}
330
331static inline void kernel_fpu_begin(void)
332{
333 struct thread_info *me = current_thread_info();
334 preempt_disable();
335 if (me->status & TS_USEDFPU)
336 __save_init_fpu(me->task);
337 else
338 clts();
339}
340
341static inline void kernel_fpu_end(void)
342{
343 stts();
344 preempt_enable();
345}
346
Huang Yingae4b6882009-08-31 13:11:54 +0800347static inline bool irq_fpu_usable(void)
348{
349 struct pt_regs *regs;
350
351 return !in_interrupt() || !(regs = get_irq_regs()) || \
352 user_mode(regs) || (read_cr0() & X86_CR0_TS);
353}
354
Suresh Siddhae4914012008-08-13 22:02:26 +1000355/*
356 * Some instructions like VIA's padlock instructions generate a spurious
357 * DNA fault but don't modify SSE registers. And these instructions
Chuck Ebbert0b8c3d52009-06-09 10:40:50 -0400358 * get used from interrupt context as well. To prevent these kernel instructions
359 * in interrupt context interacting wrongly with other user/kernel fpu usage, we
Suresh Siddhae4914012008-08-13 22:02:26 +1000360 * should use them only in the context of irq_ts_save/restore()
361 */
362static inline int irq_ts_save(void)
363{
364 /*
Chuck Ebbert0b8c3d52009-06-09 10:40:50 -0400365 * If in process context and not atomic, we can take a spurious DNA fault.
366 * Otherwise, doing clts() in process context requires disabling preemption
367 * or some heavy lifting like kernel_fpu_begin()
Suresh Siddhae4914012008-08-13 22:02:26 +1000368 */
Chuck Ebbert0b8c3d52009-06-09 10:40:50 -0400369 if (!in_atomic())
Suresh Siddhae4914012008-08-13 22:02:26 +1000370 return 0;
371
372 if (read_cr0() & X86_CR0_TS) {
373 clts();
374 return 1;
375 }
376
377 return 0;
378}
379
380static inline void irq_ts_restore(int TS_state)
381{
382 if (TS_state)
383 stts();
384}
385
Roland McGrath1eeaed72008-01-30 13:31:51 +0100386#ifdef CONFIG_X86_64
387
388static inline void save_init_fpu(struct task_struct *tsk)
389{
390 __save_init_fpu(tsk);
391 stts();
392}
393
394#define unlazy_fpu __unlazy_fpu
395#define clear_fpu __clear_fpu
396
397#else /* CONFIG_X86_32 */
398
399/*
400 * These disable preemption on their own and are safe
401 */
402static inline void save_init_fpu(struct task_struct *tsk)
403{
404 preempt_disable();
405 __save_init_fpu(tsk);
406 stts();
407 preempt_enable();
408}
409
410static inline void unlazy_fpu(struct task_struct *tsk)
411{
412 preempt_disable();
413 __unlazy_fpu(tsk);
414 preempt_enable();
415}
416
417static inline void clear_fpu(struct task_struct *tsk)
418{
419 preempt_disable();
420 __clear_fpu(tsk);
421 preempt_enable();
422}
423
424#endif /* CONFIG_X86_64 */
425
426/*
Roland McGrath1eeaed72008-01-30 13:31:51 +0100427 * i387 state interaction
428 */
429static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
430{
431 if (cpu_has_fxsr) {
Avi Kivity86603282010-05-06 11:45:46 +0300432 return tsk->thread.fpu.state->fxsave.cwd;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100433 } else {
Avi Kivity86603282010-05-06 11:45:46 +0300434 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100435 }
436}
437
438static inline unsigned short get_fpu_swd(struct task_struct *tsk)
439{
440 if (cpu_has_fxsr) {
Avi Kivity86603282010-05-06 11:45:46 +0300441 return tsk->thread.fpu.state->fxsave.swd;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100442 } else {
Avi Kivity86603282010-05-06 11:45:46 +0300443 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100444 }
445}
446
447static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
448{
449 if (cpu_has_xmm) {
Avi Kivity86603282010-05-06 11:45:46 +0300450 return tsk->thread.fpu.state->fxsave.mxcsr;
Roland McGrath1eeaed72008-01-30 13:31:51 +0100451 } else {
452 return MXCSR_DEFAULT;
453 }
454}
455
Avi Kivity86603282010-05-06 11:45:46 +0300456static bool fpu_allocated(struct fpu *fpu)
457{
458 return fpu->state != NULL;
459}
460
461static inline int fpu_alloc(struct fpu *fpu)
462{
463 if (fpu_allocated(fpu))
464 return 0;
465 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
466 if (!fpu->state)
467 return -ENOMEM;
468 WARN_ON((unsigned long)fpu->state & 15);
469 return 0;
470}
471
472static inline void fpu_free(struct fpu *fpu)
473{
474 if (fpu->state) {
475 kmem_cache_free(task_xstate_cachep, fpu->state);
476 fpu->state = NULL;
477 }
478}
479
480static inline void fpu_copy(struct fpu *dst, struct fpu *src)
481{
482 memcpy(dst->state, src->state, xstate_size);
483}
484
Sheng Yang5ee481d2010-05-17 17:22:23 +0800485extern void fpu_finit(struct fpu *fpu);
486
Herbert Xu3b0d6592009-11-03 09:11:15 -0500487#endif /* __ASSEMBLY__ */
488
489#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
490#define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5
491
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700492#endif /* _ASM_X86_I387_H */