| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/i386/kernel/i387.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1994 Linus Torvalds | 
|  | 5 | * | 
|  | 6 | *  Pentium III FXSR, SSE support | 
|  | 7 | *  General FPU state handling cleanups | 
|  | 8 | *	Gareth Hughes <gareth@valinux.com>, May 2000 | 
|  | 9 | */ | 
|  | 10 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/sched.h> | 
| Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 12 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <asm/processor.h> | 
|  | 14 | #include <asm/i387.h> | 
|  | 15 | #include <asm/math_emu.h> | 
|  | 16 | #include <asm/sigcontext.h> | 
|  | 17 | #include <asm/user.h> | 
|  | 18 | #include <asm/ptrace.h> | 
|  | 19 | #include <asm/uaccess.h> | 
|  | 20 |  | 
|  | 21 | #ifdef CONFIG_MATH_EMULATION | 
|  | 22 | #define HAVE_HWFP (boot_cpu_data.hard_math) | 
|  | 23 | #else | 
|  | 24 | #define HAVE_HWFP 1 | 
|  | 25 | #endif | 
|  | 26 |  | 
| Andreas Mohr | 7b0c2d9 | 2006-06-23 02:04:26 -0700 | [diff] [blame] | 27 | static unsigned long mxcsr_feature_mask __read_mostly = 0xffffffff; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
|  | 29 | void mxcsr_feature_mask_init(void) | 
|  | 30 | { | 
|  | 31 | unsigned long mask = 0; | 
|  | 32 | clts(); | 
|  | 33 | if (cpu_has_fxsr) { | 
|  | 34 | memset(¤t->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct)); | 
|  | 35 | asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave)); | 
|  | 36 | mask = current->thread.i387.fxsave.mxcsr_mask; | 
|  | 37 | if (mask == 0) mask = 0x0000ffbf; | 
|  | 38 | } | 
|  | 39 | mxcsr_feature_mask &= mask; | 
|  | 40 | stts(); | 
|  | 41 | } | 
|  | 42 |  | 
|  | 43 | /* | 
|  | 44 | * The _current_ task is using the FPU for the first time | 
|  | 45 | * so initialize it and set the mxcsr to its default | 
|  | 46 | * value at reset if we support XMM instructions and then | 
|  | 47 | * remeber the current task has used the FPU. | 
|  | 48 | */ | 
|  | 49 | void init_fpu(struct task_struct *tsk) | 
|  | 50 | { | 
|  | 51 | if (cpu_has_fxsr) { | 
|  | 52 | memset(&tsk->thread.i387.fxsave, 0, sizeof(struct i387_fxsave_struct)); | 
|  | 53 | tsk->thread.i387.fxsave.cwd = 0x37f; | 
|  | 54 | if (cpu_has_xmm) | 
|  | 55 | tsk->thread.i387.fxsave.mxcsr = 0x1f80; | 
|  | 56 | } else { | 
|  | 57 | memset(&tsk->thread.i387.fsave, 0, sizeof(struct i387_fsave_struct)); | 
|  | 58 | tsk->thread.i387.fsave.cwd = 0xffff037fu; | 
|  | 59 | tsk->thread.i387.fsave.swd = 0xffff0000u; | 
|  | 60 | tsk->thread.i387.fsave.twd = 0xffffffffu; | 
|  | 61 | tsk->thread.i387.fsave.fos = 0xffff0000u; | 
|  | 62 | } | 
|  | 63 | /* only the device not available exception or ptrace can call init_fpu */ | 
|  | 64 | set_stopped_child_used_math(tsk); | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 | /* | 
|  | 68 | * FPU lazy state save handling. | 
|  | 69 | */ | 
|  | 70 |  | 
|  | 71 | void kernel_fpu_begin(void) | 
|  | 72 | { | 
|  | 73 | struct thread_info *thread = current_thread_info(); | 
|  | 74 |  | 
|  | 75 | preempt_disable(); | 
|  | 76 | if (thread->status & TS_USEDFPU) { | 
|  | 77 | __save_init_fpu(thread->task); | 
|  | 78 | return; | 
|  | 79 | } | 
|  | 80 | clts(); | 
|  | 81 | } | 
| Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 82 | EXPORT_SYMBOL_GPL(kernel_fpu_begin); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | /* | 
|  | 85 | * FPU tag word conversions. | 
|  | 86 | */ | 
|  | 87 |  | 
|  | 88 | static inline unsigned short twd_i387_to_fxsr( unsigned short twd ) | 
|  | 89 | { | 
|  | 90 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | 
|  | 91 |  | 
|  | 92 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ | 
|  | 93 | tmp = ~twd; | 
|  | 94 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ | 
|  | 95 | /* and move the valid bits to the lower byte. */ | 
|  | 96 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | 
|  | 97 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | 
|  | 98 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | 
|  | 99 | return tmp; | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave ) | 
|  | 103 | { | 
|  | 104 | struct _fpxreg *st = NULL; | 
|  | 105 | unsigned long tos = (fxsave->swd >> 11) & 7; | 
|  | 106 | unsigned long twd = (unsigned long) fxsave->twd; | 
|  | 107 | unsigned long tag; | 
|  | 108 | unsigned long ret = 0xffff0000u; | 
|  | 109 | int i; | 
|  | 110 |  | 
|  | 111 | #define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16); | 
|  | 112 |  | 
|  | 113 | for ( i = 0 ; i < 8 ; i++ ) { | 
|  | 114 | if ( twd & 0x1 ) { | 
|  | 115 | st = FPREG_ADDR( fxsave, (i - tos) & 7 ); | 
|  | 116 |  | 
|  | 117 | switch ( st->exponent & 0x7fff ) { | 
|  | 118 | case 0x7fff: | 
|  | 119 | tag = 2;		/* Special */ | 
|  | 120 | break; | 
|  | 121 | case 0x0000: | 
|  | 122 | if ( !st->significand[0] && | 
|  | 123 | !st->significand[1] && | 
|  | 124 | !st->significand[2] && | 
|  | 125 | !st->significand[3] ) { | 
|  | 126 | tag = 1;	/* Zero */ | 
|  | 127 | } else { | 
|  | 128 | tag = 2;	/* Special */ | 
|  | 129 | } | 
|  | 130 | break; | 
|  | 131 | default: | 
|  | 132 | if ( st->significand[3] & 0x8000 ) { | 
|  | 133 | tag = 0;	/* Valid */ | 
|  | 134 | } else { | 
|  | 135 | tag = 2;	/* Special */ | 
|  | 136 | } | 
|  | 137 | break; | 
|  | 138 | } | 
|  | 139 | } else { | 
|  | 140 | tag = 3;			/* Empty */ | 
|  | 141 | } | 
|  | 142 | ret |= (tag << (2 * i)); | 
|  | 143 | twd = twd >> 1; | 
|  | 144 | } | 
|  | 145 | return ret; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | /* | 
|  | 149 | * FPU state interaction. | 
|  | 150 | */ | 
|  | 151 |  | 
|  | 152 | unsigned short get_fpu_cwd( struct task_struct *tsk ) | 
|  | 153 | { | 
|  | 154 | if ( cpu_has_fxsr ) { | 
|  | 155 | return tsk->thread.i387.fxsave.cwd; | 
|  | 156 | } else { | 
|  | 157 | return (unsigned short)tsk->thread.i387.fsave.cwd; | 
|  | 158 | } | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | unsigned short get_fpu_swd( struct task_struct *tsk ) | 
|  | 162 | { | 
|  | 163 | if ( cpu_has_fxsr ) { | 
|  | 164 | return tsk->thread.i387.fxsave.swd; | 
|  | 165 | } else { | 
|  | 166 | return (unsigned short)tsk->thread.i387.fsave.swd; | 
|  | 167 | } | 
|  | 168 | } | 
|  | 169 |  | 
|  | 170 | #if 0 | 
|  | 171 | unsigned short get_fpu_twd( struct task_struct *tsk ) | 
|  | 172 | { | 
|  | 173 | if ( cpu_has_fxsr ) { | 
|  | 174 | return tsk->thread.i387.fxsave.twd; | 
|  | 175 | } else { | 
|  | 176 | return (unsigned short)tsk->thread.i387.fsave.twd; | 
|  | 177 | } | 
|  | 178 | } | 
|  | 179 | #endif  /*  0  */ | 
|  | 180 |  | 
|  | 181 | unsigned short get_fpu_mxcsr( struct task_struct *tsk ) | 
|  | 182 | { | 
|  | 183 | if ( cpu_has_xmm ) { | 
|  | 184 | return tsk->thread.i387.fxsave.mxcsr; | 
|  | 185 | } else { | 
|  | 186 | return 0x1f80; | 
|  | 187 | } | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | #if 0 | 
|  | 191 |  | 
|  | 192 | void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd ) | 
|  | 193 | { | 
|  | 194 | if ( cpu_has_fxsr ) { | 
|  | 195 | tsk->thread.i387.fxsave.cwd = cwd; | 
|  | 196 | } else { | 
|  | 197 | tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000u); | 
|  | 198 | } | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | void set_fpu_swd( struct task_struct *tsk, unsigned short swd ) | 
|  | 202 | { | 
|  | 203 | if ( cpu_has_fxsr ) { | 
|  | 204 | tsk->thread.i387.fxsave.swd = swd; | 
|  | 205 | } else { | 
|  | 206 | tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000u); | 
|  | 207 | } | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | void set_fpu_twd( struct task_struct *tsk, unsigned short twd ) | 
|  | 211 | { | 
|  | 212 | if ( cpu_has_fxsr ) { | 
|  | 213 | tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd); | 
|  | 214 | } else { | 
|  | 215 | tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000u); | 
|  | 216 | } | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | #endif  /*  0  */ | 
|  | 220 |  | 
|  | 221 | /* | 
|  | 222 | * FXSR floating point environment conversions. | 
|  | 223 | */ | 
|  | 224 |  | 
|  | 225 | static int convert_fxsr_to_user( struct _fpstate __user *buf, | 
|  | 226 | struct i387_fxsave_struct *fxsave ) | 
|  | 227 | { | 
|  | 228 | unsigned long env[7]; | 
|  | 229 | struct _fpreg __user *to; | 
|  | 230 | struct _fpxreg *from; | 
|  | 231 | int i; | 
|  | 232 |  | 
|  | 233 | env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul; | 
|  | 234 | env[1] = (unsigned long)fxsave->swd | 0xffff0000ul; | 
|  | 235 | env[2] = twd_fxsr_to_i387(fxsave); | 
|  | 236 | env[3] = fxsave->fip; | 
|  | 237 | env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16); | 
|  | 238 | env[5] = fxsave->foo; | 
|  | 239 | env[6] = fxsave->fos; | 
|  | 240 |  | 
|  | 241 | if ( __copy_to_user( buf, env, 7 * sizeof(unsigned long) ) ) | 
|  | 242 | return 1; | 
|  | 243 |  | 
|  | 244 | to = &buf->_st[0]; | 
|  | 245 | from = (struct _fpxreg *) &fxsave->st_space[0]; | 
|  | 246 | for ( i = 0 ; i < 8 ; i++, to++, from++ ) { | 
|  | 247 | unsigned long __user *t = (unsigned long __user *)to; | 
|  | 248 | unsigned long *f = (unsigned long *)from; | 
|  | 249 |  | 
|  | 250 | if (__put_user(*f, t) || | 
|  | 251 | __put_user(*(f + 1), t + 1) || | 
|  | 252 | __put_user(from->exponent, &to->exponent)) | 
|  | 253 | return 1; | 
|  | 254 | } | 
|  | 255 | return 0; | 
|  | 256 | } | 
|  | 257 |  | 
|  | 258 | static int convert_fxsr_from_user( struct i387_fxsave_struct *fxsave, | 
|  | 259 | struct _fpstate __user *buf ) | 
|  | 260 | { | 
|  | 261 | unsigned long env[7]; | 
|  | 262 | struct _fpxreg *to; | 
|  | 263 | struct _fpreg __user *from; | 
|  | 264 | int i; | 
|  | 265 |  | 
|  | 266 | if ( __copy_from_user( env, buf, 7 * sizeof(long) ) ) | 
|  | 267 | return 1; | 
|  | 268 |  | 
|  | 269 | fxsave->cwd = (unsigned short)(env[0] & 0xffff); | 
|  | 270 | fxsave->swd = (unsigned short)(env[1] & 0xffff); | 
|  | 271 | fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff)); | 
|  | 272 | fxsave->fip = env[3]; | 
|  | 273 | fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16); | 
|  | 274 | fxsave->fcs = (env[4] & 0xffff); | 
|  | 275 | fxsave->foo = env[5]; | 
|  | 276 | fxsave->fos = env[6]; | 
|  | 277 |  | 
|  | 278 | to = (struct _fpxreg *) &fxsave->st_space[0]; | 
|  | 279 | from = &buf->_st[0]; | 
|  | 280 | for ( i = 0 ; i < 8 ; i++, to++, from++ ) { | 
|  | 281 | unsigned long *t = (unsigned long *)to; | 
|  | 282 | unsigned long __user *f = (unsigned long __user *)from; | 
|  | 283 |  | 
|  | 284 | if (__get_user(*t, f) || | 
|  | 285 | __get_user(*(t + 1), f + 1) || | 
|  | 286 | __get_user(to->exponent, &from->exponent)) | 
|  | 287 | return 1; | 
|  | 288 | } | 
|  | 289 | return 0; | 
|  | 290 | } | 
|  | 291 |  | 
|  | 292 | /* | 
|  | 293 | * Signal frame handlers. | 
|  | 294 | */ | 
|  | 295 |  | 
|  | 296 | static inline int save_i387_fsave( struct _fpstate __user *buf ) | 
|  | 297 | { | 
|  | 298 | struct task_struct *tsk = current; | 
|  | 299 |  | 
|  | 300 | unlazy_fpu( tsk ); | 
|  | 301 | tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd; | 
|  | 302 | if ( __copy_to_user( buf, &tsk->thread.i387.fsave, | 
|  | 303 | sizeof(struct i387_fsave_struct) ) ) | 
|  | 304 | return -1; | 
|  | 305 | return 1; | 
|  | 306 | } | 
|  | 307 |  | 
|  | 308 | static int save_i387_fxsave( struct _fpstate __user *buf ) | 
|  | 309 | { | 
|  | 310 | struct task_struct *tsk = current; | 
|  | 311 | int err = 0; | 
|  | 312 |  | 
|  | 313 | unlazy_fpu( tsk ); | 
|  | 314 |  | 
|  | 315 | if ( convert_fxsr_to_user( buf, &tsk->thread.i387.fxsave ) ) | 
|  | 316 | return -1; | 
|  | 317 |  | 
|  | 318 | err |= __put_user( tsk->thread.i387.fxsave.swd, &buf->status ); | 
|  | 319 | err |= __put_user( X86_FXSR_MAGIC, &buf->magic ); | 
|  | 320 | if ( err ) | 
|  | 321 | return -1; | 
|  | 322 |  | 
|  | 323 | if ( __copy_to_user( &buf->_fxsr_env[0], &tsk->thread.i387.fxsave, | 
|  | 324 | sizeof(struct i387_fxsave_struct) ) ) | 
|  | 325 | return -1; | 
|  | 326 | return 1; | 
|  | 327 | } | 
|  | 328 |  | 
|  | 329 | int save_i387( struct _fpstate __user *buf ) | 
|  | 330 | { | 
|  | 331 | if ( !used_math() ) | 
|  | 332 | return 0; | 
|  | 333 |  | 
|  | 334 | /* This will cause a "finit" to be triggered by the next | 
|  | 335 | * attempted FPU operation by the 'current' process. | 
|  | 336 | */ | 
|  | 337 | clear_used_math(); | 
|  | 338 |  | 
|  | 339 | if ( HAVE_HWFP ) { | 
|  | 340 | if ( cpu_has_fxsr ) { | 
|  | 341 | return save_i387_fxsave( buf ); | 
|  | 342 | } else { | 
|  | 343 | return save_i387_fsave( buf ); | 
|  | 344 | } | 
|  | 345 | } else { | 
|  | 346 | return save_i387_soft( ¤t->thread.i387.soft, buf ); | 
|  | 347 | } | 
|  | 348 | } | 
|  | 349 |  | 
|  | 350 | static inline int restore_i387_fsave( struct _fpstate __user *buf ) | 
|  | 351 | { | 
|  | 352 | struct task_struct *tsk = current; | 
|  | 353 | clear_fpu( tsk ); | 
|  | 354 | return __copy_from_user( &tsk->thread.i387.fsave, buf, | 
|  | 355 | sizeof(struct i387_fsave_struct) ); | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | static int restore_i387_fxsave( struct _fpstate __user *buf ) | 
|  | 359 | { | 
|  | 360 | int err; | 
|  | 361 | struct task_struct *tsk = current; | 
|  | 362 | clear_fpu( tsk ); | 
|  | 363 | err = __copy_from_user( &tsk->thread.i387.fxsave, &buf->_fxsr_env[0], | 
|  | 364 | sizeof(struct i387_fxsave_struct) ); | 
|  | 365 | /* mxcsr reserved bits must be masked to zero for security reasons */ | 
|  | 366 | tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; | 
|  | 367 | return err ? 1 : convert_fxsr_from_user( &tsk->thread.i387.fxsave, buf ); | 
|  | 368 | } | 
|  | 369 |  | 
|  | 370 | int restore_i387( struct _fpstate __user *buf ) | 
|  | 371 | { | 
|  | 372 | int err; | 
|  | 373 |  | 
|  | 374 | if ( HAVE_HWFP ) { | 
|  | 375 | if ( cpu_has_fxsr ) { | 
|  | 376 | err = restore_i387_fxsave( buf ); | 
|  | 377 | } else { | 
|  | 378 | err = restore_i387_fsave( buf ); | 
|  | 379 | } | 
|  | 380 | } else { | 
|  | 381 | err = restore_i387_soft( ¤t->thread.i387.soft, buf ); | 
|  | 382 | } | 
|  | 383 | set_used_math(); | 
|  | 384 | return err; | 
|  | 385 | } | 
|  | 386 |  | 
|  | 387 | /* | 
|  | 388 | * ptrace request handlers. | 
|  | 389 | */ | 
|  | 390 |  | 
|  | 391 | static inline int get_fpregs_fsave( struct user_i387_struct __user *buf, | 
|  | 392 | struct task_struct *tsk ) | 
|  | 393 | { | 
|  | 394 | return __copy_to_user( buf, &tsk->thread.i387.fsave, | 
|  | 395 | sizeof(struct user_i387_struct) ); | 
|  | 396 | } | 
|  | 397 |  | 
|  | 398 | static inline int get_fpregs_fxsave( struct user_i387_struct __user *buf, | 
|  | 399 | struct task_struct *tsk ) | 
|  | 400 | { | 
|  | 401 | return convert_fxsr_to_user( (struct _fpstate __user *)buf, | 
|  | 402 | &tsk->thread.i387.fxsave ); | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | int get_fpregs( struct user_i387_struct __user *buf, struct task_struct *tsk ) | 
|  | 406 | { | 
|  | 407 | if ( HAVE_HWFP ) { | 
|  | 408 | if ( cpu_has_fxsr ) { | 
|  | 409 | return get_fpregs_fxsave( buf, tsk ); | 
|  | 410 | } else { | 
|  | 411 | return get_fpregs_fsave( buf, tsk ); | 
|  | 412 | } | 
|  | 413 | } else { | 
|  | 414 | return save_i387_soft( &tsk->thread.i387.soft, | 
|  | 415 | (struct _fpstate __user *)buf ); | 
|  | 416 | } | 
|  | 417 | } | 
|  | 418 |  | 
|  | 419 | static inline int set_fpregs_fsave( struct task_struct *tsk, | 
|  | 420 | struct user_i387_struct __user *buf ) | 
|  | 421 | { | 
|  | 422 | return __copy_from_user( &tsk->thread.i387.fsave, buf, | 
|  | 423 | sizeof(struct user_i387_struct) ); | 
|  | 424 | } | 
|  | 425 |  | 
|  | 426 | static inline int set_fpregs_fxsave( struct task_struct *tsk, | 
|  | 427 | struct user_i387_struct __user *buf ) | 
|  | 428 | { | 
|  | 429 | return convert_fxsr_from_user( &tsk->thread.i387.fxsave, | 
|  | 430 | (struct _fpstate __user *)buf ); | 
|  | 431 | } | 
|  | 432 |  | 
|  | 433 | int set_fpregs( struct task_struct *tsk, struct user_i387_struct __user *buf ) | 
|  | 434 | { | 
|  | 435 | if ( HAVE_HWFP ) { | 
|  | 436 | if ( cpu_has_fxsr ) { | 
|  | 437 | return set_fpregs_fxsave( tsk, buf ); | 
|  | 438 | } else { | 
|  | 439 | return set_fpregs_fsave( tsk, buf ); | 
|  | 440 | } | 
|  | 441 | } else { | 
|  | 442 | return restore_i387_soft( &tsk->thread.i387.soft, | 
|  | 443 | (struct _fpstate __user *)buf ); | 
|  | 444 | } | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | int get_fpxregs( struct user_fxsr_struct __user *buf, struct task_struct *tsk ) | 
|  | 448 | { | 
|  | 449 | if ( cpu_has_fxsr ) { | 
|  | 450 | if (__copy_to_user( buf, &tsk->thread.i387.fxsave, | 
|  | 451 | sizeof(struct user_fxsr_struct) )) | 
|  | 452 | return -EFAULT; | 
|  | 453 | return 0; | 
|  | 454 | } else { | 
|  | 455 | return -EIO; | 
|  | 456 | } | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct __user *buf ) | 
|  | 460 | { | 
|  | 461 | int ret = 0; | 
|  | 462 |  | 
|  | 463 | if ( cpu_has_fxsr ) { | 
|  | 464 | if (__copy_from_user( &tsk->thread.i387.fxsave, buf, | 
|  | 465 | sizeof(struct user_fxsr_struct) )) | 
|  | 466 | ret = -EFAULT; | 
|  | 467 | /* mxcsr reserved bits must be masked to zero for security reasons */ | 
|  | 468 | tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; | 
|  | 469 | } else { | 
|  | 470 | ret = -EIO; | 
|  | 471 | } | 
|  | 472 | return ret; | 
|  | 473 | } | 
|  | 474 |  | 
|  | 475 | /* | 
|  | 476 | * FPU state for core dumps. | 
|  | 477 | */ | 
|  | 478 |  | 
|  | 479 | static inline void copy_fpu_fsave( struct task_struct *tsk, | 
|  | 480 | struct user_i387_struct *fpu ) | 
|  | 481 | { | 
|  | 482 | memcpy( fpu, &tsk->thread.i387.fsave, | 
|  | 483 | sizeof(struct user_i387_struct) ); | 
|  | 484 | } | 
|  | 485 |  | 
|  | 486 | static inline void copy_fpu_fxsave( struct task_struct *tsk, | 
|  | 487 | struct user_i387_struct *fpu ) | 
|  | 488 | { | 
|  | 489 | unsigned short *to; | 
|  | 490 | unsigned short *from; | 
|  | 491 | int i; | 
|  | 492 |  | 
|  | 493 | memcpy( fpu, &tsk->thread.i387.fxsave, 7 * sizeof(long) ); | 
|  | 494 |  | 
|  | 495 | to = (unsigned short *)&fpu->st_space[0]; | 
|  | 496 | from = (unsigned short *)&tsk->thread.i387.fxsave.st_space[0]; | 
|  | 497 | for ( i = 0 ; i < 8 ; i++, to += 5, from += 8 ) { | 
|  | 498 | memcpy( to, from, 5 * sizeof(unsigned short) ); | 
|  | 499 | } | 
|  | 500 | } | 
|  | 501 |  | 
|  | 502 | int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu ) | 
|  | 503 | { | 
|  | 504 | int fpvalid; | 
|  | 505 | struct task_struct *tsk = current; | 
|  | 506 |  | 
|  | 507 | fpvalid = !!used_math(); | 
|  | 508 | if ( fpvalid ) { | 
|  | 509 | unlazy_fpu( tsk ); | 
|  | 510 | if ( cpu_has_fxsr ) { | 
|  | 511 | copy_fpu_fxsave( tsk, fpu ); | 
|  | 512 | } else { | 
|  | 513 | copy_fpu_fsave( tsk, fpu ); | 
|  | 514 | } | 
|  | 515 | } | 
|  | 516 |  | 
|  | 517 | return fpvalid; | 
|  | 518 | } | 
| Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 519 | EXPORT_SYMBOL(dump_fpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 |  | 
|  | 521 | int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu) | 
|  | 522 | { | 
|  | 523 | int fpvalid = !!tsk_used_math(tsk); | 
|  | 524 |  | 
|  | 525 | if (fpvalid) { | 
|  | 526 | if (tsk == current) | 
|  | 527 | unlazy_fpu(tsk); | 
|  | 528 | if (cpu_has_fxsr) | 
|  | 529 | copy_fpu_fxsave(tsk, fpu); | 
|  | 530 | else | 
|  | 531 | copy_fpu_fsave(tsk, fpu); | 
|  | 532 | } | 
|  | 533 | return fpvalid; | 
|  | 534 | } | 
|  | 535 |  | 
|  | 536 | int dump_task_extended_fpu(struct task_struct *tsk, struct user_fxsr_struct *fpu) | 
|  | 537 | { | 
|  | 538 | int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr; | 
|  | 539 |  | 
|  | 540 | if (fpvalid) { | 
|  | 541 | if (tsk == current) | 
|  | 542 | unlazy_fpu(tsk); | 
|  | 543 | memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(*fpu)); | 
|  | 544 | } | 
|  | 545 | return fpvalid; | 
|  | 546 | } |