| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/i386/entry.S | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1991, 1992  Linus Torvalds | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | /* | 
 | 8 |  * entry.S contains the system-call and fault low-level handling routines. | 
 | 9 |  * This also contains the timer-interrupt handler, as well as all interrupts | 
 | 10 |  * and faults that can result in a task-switch. | 
 | 11 |  * | 
 | 12 |  * NOTE: This code handles signal-recognition, which happens every time | 
 | 13 |  * after a timer-interrupt and after each system call. | 
 | 14 |  * | 
 | 15 |  * I changed all the .align's to 4 (16 byte alignment), as that's faster | 
 | 16 |  * on a 486. | 
 | 17 |  * | 
 | 18 |  * Stack layout in 'ret_from_system_call': | 
 | 19 |  * 	ptrace needs to have all regs on the stack. | 
 | 20 |  *	if the order here is changed, it needs to be | 
 | 21 |  *	updated in fork.c:copy_process, signal.c:do_signal, | 
 | 22 |  *	ptrace.c and ptrace.h | 
 | 23 |  * | 
 | 24 |  *	 0(%esp) - %ebx | 
 | 25 |  *	 4(%esp) - %ecx | 
 | 26 |  *	 8(%esp) - %edx | 
 | 27 |  *       C(%esp) - %esi | 
 | 28 |  *	10(%esp) - %edi | 
 | 29 |  *	14(%esp) - %ebp | 
 | 30 |  *	18(%esp) - %eax | 
 | 31 |  *	1C(%esp) - %ds | 
 | 32 |  *	20(%esp) - %es | 
 | 33 |  *	24(%esp) - orig_eax | 
 | 34 |  *	28(%esp) - %eip | 
 | 35 |  *	2C(%esp) - %cs | 
 | 36 |  *	30(%esp) - %eflags | 
 | 37 |  *	34(%esp) - %oldesp | 
 | 38 |  *	38(%esp) - %oldss | 
 | 39 |  * | 
 | 40 |  * "current" is in register %ebx during any slow entries. | 
 | 41 |  */ | 
 | 42 |  | 
 | 43 | #include <linux/config.h> | 
 | 44 | #include <linux/linkage.h> | 
 | 45 | #include <asm/thread_info.h> | 
 | 46 | #include <asm/errno.h> | 
 | 47 | #include <asm/segment.h> | 
 | 48 | #include <asm/smp.h> | 
 | 49 | #include <asm/page.h> | 
 | 50 | #include <asm/desc.h> | 
 | 51 | #include "irq_vectors.h" | 
 | 52 |  | 
 | 53 | #define nr_syscalls ((syscall_table_size)/4) | 
 | 54 |  | 
 | 55 | EBX		= 0x00 | 
 | 56 | ECX		= 0x04 | 
 | 57 | EDX		= 0x08 | 
 | 58 | ESI		= 0x0C | 
 | 59 | EDI		= 0x10 | 
 | 60 | EBP		= 0x14 | 
 | 61 | EAX		= 0x18 | 
 | 62 | DS		= 0x1C | 
 | 63 | ES		= 0x20 | 
 | 64 | ORIG_EAX	= 0x24 | 
 | 65 | EIP		= 0x28 | 
 | 66 | CS		= 0x2C | 
 | 67 | EFLAGS		= 0x30 | 
 | 68 | OLDESP		= 0x34 | 
 | 69 | OLDSS		= 0x38 | 
 | 70 |  | 
 | 71 | CF_MASK		= 0x00000001 | 
 | 72 | TF_MASK		= 0x00000100 | 
 | 73 | IF_MASK		= 0x00000200 | 
 | 74 | DF_MASK		= 0x00000400  | 
 | 75 | NT_MASK		= 0x00004000 | 
 | 76 | VM_MASK		= 0x00020000 | 
 | 77 |  | 
 | 78 | #ifdef CONFIG_PREEMPT | 
 | 79 | #define preempt_stop		cli | 
 | 80 | #else | 
 | 81 | #define preempt_stop | 
 | 82 | #define resume_kernel		restore_nocheck | 
 | 83 | #endif | 
 | 84 |  | 
 | 85 | #define SAVE_ALL \ | 
 | 86 | 	cld; \ | 
 | 87 | 	pushl %es; \ | 
 | 88 | 	pushl %ds; \ | 
 | 89 | 	pushl %eax; \ | 
 | 90 | 	pushl %ebp; \ | 
 | 91 | 	pushl %edi; \ | 
 | 92 | 	pushl %esi; \ | 
 | 93 | 	pushl %edx; \ | 
 | 94 | 	pushl %ecx; \ | 
 | 95 | 	pushl %ebx; \ | 
 | 96 | 	movl $(__USER_DS), %edx; \ | 
 | 97 | 	movl %edx, %ds; \ | 
 | 98 | 	movl %edx, %es; | 
 | 99 |  | 
 | 100 | #define RESTORE_INT_REGS \ | 
 | 101 | 	popl %ebx;	\ | 
 | 102 | 	popl %ecx;	\ | 
 | 103 | 	popl %edx;	\ | 
 | 104 | 	popl %esi;	\ | 
 | 105 | 	popl %edi;	\ | 
 | 106 | 	popl %ebp;	\ | 
 | 107 | 	popl %eax | 
 | 108 |  | 
 | 109 | #define RESTORE_REGS	\ | 
 | 110 | 	RESTORE_INT_REGS; \ | 
 | 111 | 1:	popl %ds;	\ | 
 | 112 | 2:	popl %es;	\ | 
 | 113 | .section .fixup,"ax";	\ | 
 | 114 | 3:	movl $0,(%esp);	\ | 
 | 115 | 	jmp 1b;		\ | 
 | 116 | 4:	movl $0,(%esp);	\ | 
 | 117 | 	jmp 2b;		\ | 
 | 118 | .previous;		\ | 
 | 119 | .section __ex_table,"a";\ | 
 | 120 | 	.align 4;	\ | 
 | 121 | 	.long 1b,3b;	\ | 
 | 122 | 	.long 2b,4b;	\ | 
 | 123 | .previous | 
 | 124 |  | 
 | 125 |  | 
 | 126 | ENTRY(ret_from_fork) | 
 | 127 | 	pushl %eax | 
 | 128 | 	call schedule_tail | 
 | 129 | 	GET_THREAD_INFO(%ebp) | 
 | 130 | 	popl %eax | 
 | 131 | 	jmp syscall_exit | 
 | 132 |  | 
 | 133 | /* | 
 | 134 |  * Return to user mode is not as complex as all this looks, | 
 | 135 |  * but we want the default path for a system call return to | 
 | 136 |  * go as quickly as possible which is why some of this is | 
 | 137 |  * less clear than it otherwise should be. | 
 | 138 |  */ | 
 | 139 |  | 
 | 140 | 	# userspace resumption stub bypassing syscall exit tracing | 
 | 141 | 	ALIGN | 
 | 142 | ret_from_exception: | 
 | 143 | 	preempt_stop | 
 | 144 | ret_from_intr: | 
 | 145 | 	GET_THREAD_INFO(%ebp) | 
 | 146 | 	movl EFLAGS(%esp), %eax		# mix EFLAGS and CS | 
 | 147 | 	movb CS(%esp), %al | 
 | 148 | 	testl $(VM_MASK | 3), %eax | 
 | 149 | 	jz resume_kernel | 
 | 150 | ENTRY(resume_userspace) | 
 | 151 |  	cli				# make sure we don't miss an interrupt | 
 | 152 | 					# setting need_resched or sigpending | 
 | 153 | 					# between sampling and the iret | 
 | 154 | 	movl TI_flags(%ebp), %ecx | 
 | 155 | 	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on | 
 | 156 | 					# int/exception return? | 
 | 157 | 	jne work_pending | 
 | 158 | 	jmp restore_all | 
 | 159 |  | 
 | 160 | #ifdef CONFIG_PREEMPT | 
 | 161 | ENTRY(resume_kernel) | 
 | 162 | 	cli | 
 | 163 | 	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ? | 
 | 164 | 	jnz restore_nocheck | 
 | 165 | need_resched: | 
 | 166 | 	movl TI_flags(%ebp), %ecx	# need_resched set ? | 
 | 167 | 	testb $_TIF_NEED_RESCHED, %cl | 
 | 168 | 	jz restore_all | 
 | 169 | 	testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ? | 
 | 170 | 	jz restore_all | 
 | 171 | 	call preempt_schedule_irq | 
 | 172 | 	jmp need_resched | 
 | 173 | #endif | 
 | 174 |  | 
 | 175 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | 
 | 176 |    the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */ | 
 | 177 |  | 
 | 178 | 	# sysenter call handler stub | 
 | 179 | ENTRY(sysenter_entry) | 
 | 180 | 	movl TSS_sysenter_esp0(%esp),%esp | 
 | 181 | sysenter_past_esp: | 
 | 182 | 	sti | 
 | 183 | 	pushl $(__USER_DS) | 
 | 184 | 	pushl %ebp | 
 | 185 | 	pushfl | 
 | 186 | 	pushl $(__USER_CS) | 
 | 187 | 	pushl $SYSENTER_RETURN | 
 | 188 |  | 
 | 189 | /* | 
 | 190 |  * Load the potential sixth argument from user stack. | 
 | 191 |  * Careful about security. | 
 | 192 |  */ | 
 | 193 | 	cmpl $__PAGE_OFFSET-3,%ebp | 
 | 194 | 	jae syscall_fault | 
 | 195 | 1:	movl (%ebp),%ebp | 
 | 196 | .section __ex_table,"a" | 
 | 197 | 	.align 4 | 
 | 198 | 	.long 1b,syscall_fault | 
 | 199 | .previous | 
 | 200 |  | 
 | 201 | 	pushl %eax | 
 | 202 | 	SAVE_ALL | 
 | 203 | 	GET_THREAD_INFO(%ebp) | 
 | 204 |  | 
 | 205 | 	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 
| Laurent Vivier | ed75e8d | 2005-09-03 15:57:18 -0700 | [diff] [blame] | 206 | 	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | 	jnz syscall_trace_entry | 
 | 208 | 	cmpl $(nr_syscalls), %eax | 
 | 209 | 	jae syscall_badsys | 
 | 210 | 	call *sys_call_table(,%eax,4) | 
 | 211 | 	movl %eax,EAX(%esp) | 
 | 212 | 	cli | 
 | 213 | 	movl TI_flags(%ebp), %ecx | 
 | 214 | 	testw $_TIF_ALLWORK_MASK, %cx | 
 | 215 | 	jne syscall_exit_work | 
 | 216 | /* if something modifies registers it must also disable sysexit */ | 
 | 217 | 	movl EIP(%esp), %edx | 
 | 218 | 	movl OLDESP(%esp), %ecx | 
 | 219 | 	xorl %ebp,%ebp | 
 | 220 | 	sti | 
 | 221 | 	sysexit | 
 | 222 |  | 
 | 223 |  | 
 | 224 | 	# system call handler stub | 
 | 225 | ENTRY(system_call) | 
 | 226 | 	pushl %eax			# save orig_eax | 
 | 227 | 	SAVE_ALL | 
 | 228 | 	GET_THREAD_INFO(%ebp) | 
| Chuck Ebbert | 635cf99 | 2006-03-23 02:59:48 -0800 | [diff] [blame] | 229 | 	testl $TF_MASK,EFLAGS(%esp) | 
 | 230 | 	jz no_singlestep | 
 | 231 | 	orl $_TIF_SINGLESTEP,TI_flags(%ebp) | 
 | 232 | no_singlestep: | 
| Laurent Vivier | ed75e8d | 2005-09-03 15:57:18 -0700 | [diff] [blame] | 233 | 					# system call tracing in operation / emulation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | 	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | 
| Laurent Vivier | ed75e8d | 2005-09-03 15:57:18 -0700 | [diff] [blame] | 235 | 	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | 	jnz syscall_trace_entry | 
 | 237 | 	cmpl $(nr_syscalls), %eax | 
 | 238 | 	jae syscall_badsys | 
 | 239 | syscall_call: | 
 | 240 | 	call *sys_call_table(,%eax,4) | 
 | 241 | 	movl %eax,EAX(%esp)		# store the return value | 
 | 242 | syscall_exit: | 
 | 243 | 	cli				# make sure we don't miss an interrupt | 
 | 244 | 					# setting need_resched or sigpending | 
 | 245 | 					# between sampling and the iret | 
 | 246 | 	movl TI_flags(%ebp), %ecx | 
 | 247 | 	testw $_TIF_ALLWORK_MASK, %cx	# current->work | 
 | 248 | 	jne syscall_exit_work | 
 | 249 |  | 
 | 250 | restore_all: | 
 | 251 | 	movl EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS | 
| Stas Sergeev | 5df2408 | 2005-04-16 15:24:01 -0700 | [diff] [blame] | 252 | 	# Warning: OLDSS(%esp) contains the wrong/random values if we | 
 | 253 | 	# are returning to the kernel. | 
 | 254 | 	# See comments in process.c:copy_thread() for details. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | 	movb OLDSS(%esp), %ah | 
 | 256 | 	movb CS(%esp), %al | 
 | 257 | 	andl $(VM_MASK | (4 << 8) | 3), %eax | 
 | 258 | 	cmpl $((4 << 8) | 3), %eax | 
 | 259 | 	je ldt_ss			# returning to user-space with LDT SS | 
 | 260 | restore_nocheck: | 
 | 261 | 	RESTORE_REGS | 
 | 262 | 	addl $4, %esp | 
 | 263 | 1:	iret | 
 | 264 | .section .fixup,"ax" | 
 | 265 | iret_exc: | 
 | 266 | 	sti | 
| Linus Torvalds | a879cbb | 2005-04-29 09:38:44 -0700 | [diff] [blame] | 267 | 	pushl $0			# no error code | 
 | 268 | 	pushl $do_iret_error | 
 | 269 | 	jmp error_code | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | .previous | 
 | 271 | .section __ex_table,"a" | 
 | 272 | 	.align 4 | 
 | 273 | 	.long 1b,iret_exc | 
 | 274 | .previous | 
 | 275 |  | 
 | 276 | ldt_ss: | 
 | 277 | 	larl OLDSS(%esp), %eax | 
 | 278 | 	jnz restore_nocheck | 
 | 279 | 	testl $0x00400000, %eax		# returning to 32bit stack? | 
 | 280 | 	jnz restore_nocheck		# allright, normal return | 
 | 281 | 	/* If returning to userspace with 16bit stack, | 
 | 282 | 	 * try to fix the higher word of ESP, as the CPU | 
 | 283 | 	 * won't restore it. | 
 | 284 | 	 * This is an "official" bug of all the x86-compatible | 
 | 285 | 	 * CPUs, which we can try to work around to make | 
 | 286 | 	 * dosemu and wine happy. */ | 
 | 287 | 	subl $8, %esp		# reserve space for switch16 pointer | 
 | 288 | 	cli | 
 | 289 | 	movl %esp, %eax | 
 | 290 | 	/* Set up the 16bit stack frame with switch32 pointer on top, | 
 | 291 | 	 * and a switch16 pointer on top of the current frame. */ | 
 | 292 | 	call setup_x86_bogus_stack | 
 | 293 | 	RESTORE_REGS | 
 | 294 | 	lss 20+4(%esp), %esp	# switch to 16bit stack | 
 | 295 | 1:	iret | 
 | 296 | .section __ex_table,"a" | 
 | 297 | 	.align 4 | 
 | 298 | 	.long 1b,iret_exc | 
 | 299 | .previous | 
 | 300 |  | 
 | 301 | 	# perform work that needs to be done immediately before resumption | 
 | 302 | 	ALIGN | 
 | 303 | work_pending: | 
 | 304 | 	testb $_TIF_NEED_RESCHED, %cl | 
 | 305 | 	jz work_notifysig | 
 | 306 | work_resched: | 
 | 307 | 	call schedule | 
 | 308 | 	cli				# make sure we don't miss an interrupt | 
 | 309 | 					# setting need_resched or sigpending | 
 | 310 | 					# between sampling and the iret | 
 | 311 | 	movl TI_flags(%ebp), %ecx | 
 | 312 | 	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other | 
 | 313 | 					# than syscall tracing? | 
 | 314 | 	jz restore_all | 
 | 315 | 	testb $_TIF_NEED_RESCHED, %cl | 
 | 316 | 	jnz work_resched | 
 | 317 |  | 
 | 318 | work_notifysig:				# deal with pending signals and | 
 | 319 | 					# notify-resume requests | 
 | 320 | 	testl $VM_MASK, EFLAGS(%esp) | 
 | 321 | 	movl %esp, %eax | 
 | 322 | 	jne work_notifysig_v86		# returning to kernel-space or | 
 | 323 | 					# vm86-space | 
 | 324 | 	xorl %edx, %edx | 
 | 325 | 	call do_notify_resume | 
| Roland McGrath | c3ff8ec | 2005-09-11 01:44:45 -0700 | [diff] [blame] | 326 | 	jmp resume_userspace | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 |  | 
 | 328 | 	ALIGN | 
 | 329 | work_notifysig_v86: | 
| Matt Mackall | 64ca900 | 2006-01-08 01:05:26 -0800 | [diff] [blame] | 330 | #ifdef CONFIG_VM86 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | 	pushl %ecx			# save ti_flags for do_notify_resume | 
 | 332 | 	call save_v86_state		# %eax contains pt_regs pointer | 
 | 333 | 	popl %ecx | 
 | 334 | 	movl %eax, %esp | 
 | 335 | 	xorl %edx, %edx | 
 | 336 | 	call do_notify_resume | 
| Roland McGrath | c3ff8ec | 2005-09-11 01:44:45 -0700 | [diff] [blame] | 337 | 	jmp resume_userspace | 
| Matt Mackall | 64ca900 | 2006-01-08 01:05:26 -0800 | [diff] [blame] | 338 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 |  | 
 | 340 | 	# perform syscall exit tracing | 
 | 341 | 	ALIGN | 
 | 342 | syscall_trace_entry: | 
 | 343 | 	movl $-ENOSYS,EAX(%esp) | 
 | 344 | 	movl %esp, %eax | 
 | 345 | 	xorl %edx,%edx | 
 | 346 | 	call do_syscall_trace | 
| Laurent Vivier | ed75e8d | 2005-09-03 15:57:18 -0700 | [diff] [blame] | 347 | 	cmpl $0, %eax | 
| Paolo 'Blaisorblade' Giarrusso | 640aa46 | 2005-09-03 15:57:22 -0700 | [diff] [blame] | 348 | 	jne resume_userspace		# ret != 0 -> running under PTRACE_SYSEMU, | 
| Laurent Vivier | ed75e8d | 2005-09-03 15:57:18 -0700 | [diff] [blame] | 349 | 					# so must skip actual syscall | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | 	movl ORIG_EAX(%esp), %eax | 
 | 351 | 	cmpl $(nr_syscalls), %eax | 
 | 352 | 	jnae syscall_call | 
 | 353 | 	jmp syscall_exit | 
 | 354 |  | 
 | 355 | 	# perform syscall exit tracing | 
 | 356 | 	ALIGN | 
 | 357 | syscall_exit_work: | 
 | 358 | 	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | 
 | 359 | 	jz work_pending | 
 | 360 | 	sti				# could let do_syscall_trace() call | 
 | 361 | 					# schedule() instead | 
 | 362 | 	movl %esp, %eax | 
 | 363 | 	movl $1, %edx | 
 | 364 | 	call do_syscall_trace | 
 | 365 | 	jmp resume_userspace | 
 | 366 |  | 
 | 367 | 	ALIGN | 
 | 368 | syscall_fault: | 
 | 369 | 	pushl %eax			# save orig_eax | 
 | 370 | 	SAVE_ALL | 
 | 371 | 	GET_THREAD_INFO(%ebp) | 
 | 372 | 	movl $-EFAULT,EAX(%esp) | 
 | 373 | 	jmp resume_userspace | 
 | 374 |  | 
 | 375 | 	ALIGN | 
 | 376 | syscall_badsys: | 
 | 377 | 	movl $-ENOSYS,EAX(%esp) | 
 | 378 | 	jmp resume_userspace | 
 | 379 |  | 
 | 380 | #define FIXUP_ESPFIX_STACK \ | 
 | 381 | 	movl %esp, %eax; \ | 
 | 382 | 	/* switch to 32bit stack using the pointer on top of 16bit stack */ \ | 
 | 383 | 	lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \ | 
 | 384 | 	/* copy data from 16bit stack to 32bit stack */ \ | 
 | 385 | 	call fixup_x86_bogus_stack; \ | 
 | 386 | 	/* put ESP to the proper location */ \ | 
 | 387 | 	movl %eax, %esp; | 
 | 388 | #define UNWIND_ESPFIX_STACK \ | 
 | 389 | 	pushl %eax; \ | 
 | 390 | 	movl %ss, %eax; \ | 
 | 391 | 	/* see if on 16bit stack */ \ | 
 | 392 | 	cmpw $__ESPFIX_SS, %ax; \ | 
 | 393 | 	jne 28f; \ | 
 | 394 | 	movl $__KERNEL_DS, %edx; \ | 
 | 395 | 	movl %edx, %ds; \ | 
 | 396 | 	movl %edx, %es; \ | 
 | 397 | 	/* switch to 32bit stack */ \ | 
 | 398 | 	FIXUP_ESPFIX_STACK \ | 
 | 399 | 28:	popl %eax; | 
 | 400 |  | 
 | 401 | /* | 
 | 402 |  * Build the entry stubs and pointer table with | 
 | 403 |  * some assembler magic. | 
 | 404 |  */ | 
 | 405 | .data | 
 | 406 | ENTRY(interrupt) | 
 | 407 | .text | 
 | 408 |  | 
 | 409 | vector=0 | 
 | 410 | ENTRY(irq_entries_start) | 
 | 411 | .rept NR_IRQS | 
 | 412 | 	ALIGN | 
 | 413 | 1:	pushl $vector-256 | 
 | 414 | 	jmp common_interrupt | 
 | 415 | .data | 
 | 416 | 	.long 1b | 
 | 417 | .text | 
 | 418 | vector=vector+1 | 
 | 419 | .endr | 
 | 420 |  | 
 | 421 | 	ALIGN | 
 | 422 | common_interrupt: | 
 | 423 | 	SAVE_ALL | 
 | 424 | 	movl %esp,%eax | 
 | 425 | 	call do_IRQ | 
 | 426 | 	jmp ret_from_intr | 
 | 427 |  | 
 | 428 | #define BUILD_INTERRUPT(name, nr)	\ | 
 | 429 | ENTRY(name)				\ | 
 | 430 | 	pushl $nr-256;			\ | 
 | 431 | 	SAVE_ALL			\ | 
 | 432 | 	movl %esp,%eax;			\ | 
 | 433 | 	call smp_/**/name;		\ | 
 | 434 | 	jmp ret_from_intr; | 
 | 435 |  | 
 | 436 | /* The include is where all of the SMP etc. interrupts come from */ | 
 | 437 | #include "entry_arch.h" | 
 | 438 |  | 
 | 439 | ENTRY(divide_error) | 
 | 440 | 	pushl $0			# no error code | 
 | 441 | 	pushl $do_divide_error | 
 | 442 | 	ALIGN | 
 | 443 | error_code: | 
 | 444 | 	pushl %ds | 
 | 445 | 	pushl %eax | 
 | 446 | 	xorl %eax, %eax | 
 | 447 | 	pushl %ebp | 
 | 448 | 	pushl %edi | 
 | 449 | 	pushl %esi | 
 | 450 | 	pushl %edx | 
 | 451 | 	decl %eax			# eax = -1 | 
 | 452 | 	pushl %ecx | 
 | 453 | 	pushl %ebx | 
 | 454 | 	cld | 
 | 455 | 	pushl %es | 
 | 456 | 	UNWIND_ESPFIX_STACK | 
 | 457 | 	popl %ecx | 
 | 458 | 	movl ES(%esp), %edi		# get the function address | 
 | 459 | 	movl ORIG_EAX(%esp), %edx	# get the error code | 
 | 460 | 	movl %eax, ORIG_EAX(%esp) | 
 | 461 | 	movl %ecx, ES(%esp) | 
 | 462 | 	movl $(__USER_DS), %ecx | 
 | 463 | 	movl %ecx, %ds | 
 | 464 | 	movl %ecx, %es | 
 | 465 | 	movl %esp,%eax			# pt_regs pointer | 
 | 466 | 	call *%edi | 
 | 467 | 	jmp ret_from_exception | 
 | 468 |  | 
 | 469 | ENTRY(coprocessor_error) | 
 | 470 | 	pushl $0 | 
 | 471 | 	pushl $do_coprocessor_error | 
 | 472 | 	jmp error_code | 
 | 473 |  | 
 | 474 | ENTRY(simd_coprocessor_error) | 
 | 475 | 	pushl $0 | 
 | 476 | 	pushl $do_simd_coprocessor_error | 
 | 477 | 	jmp error_code | 
 | 478 |  | 
 | 479 | ENTRY(device_not_available) | 
 | 480 | 	pushl $-1			# mark this as an int | 
 | 481 | 	SAVE_ALL | 
 | 482 | 	movl %cr0, %eax | 
 | 483 | 	testl $0x4, %eax		# EM (math emulation bit) | 
 | 484 | 	jne device_not_available_emulate | 
 | 485 | 	preempt_stop | 
 | 486 | 	call math_state_restore | 
 | 487 | 	jmp ret_from_exception | 
 | 488 | device_not_available_emulate: | 
 | 489 | 	pushl $0			# temporary storage for ORIG_EIP | 
 | 490 | 	call math_emulate | 
 | 491 | 	addl $4, %esp | 
 | 492 | 	jmp ret_from_exception | 
 | 493 |  | 
 | 494 | /* | 
 | 495 |  * Debug traps and NMI can happen at the one SYSENTER instruction | 
 | 496 |  * that sets up the real kernel stack. Check here, since we can't | 
 | 497 |  * allow the wrong stack to be used. | 
 | 498 |  * | 
 | 499 |  * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have | 
 | 500 |  * already pushed 3 words if it hits on the sysenter instruction: | 
 | 501 |  * eflags, cs and eip. | 
 | 502 |  * | 
 | 503 |  * We just load the right stack, and push the three (known) values | 
 | 504 |  * by hand onto the new stack - while updating the return eip past | 
 | 505 |  * the instruction that would have done it for sysenter. | 
 | 506 |  */ | 
 | 507 | #define FIX_STACK(offset, ok, label)		\ | 
 | 508 | 	cmpw $__KERNEL_CS,4(%esp);		\ | 
 | 509 | 	jne ok;					\ | 
 | 510 | label:						\ | 
 | 511 | 	movl TSS_sysenter_esp0+offset(%esp),%esp;	\ | 
 | 512 | 	pushfl;					\ | 
 | 513 | 	pushl $__KERNEL_CS;			\ | 
 | 514 | 	pushl $sysenter_past_esp | 
 | 515 |  | 
| Prasanna S Panchamukhi | 3d97ae5 | 2005-09-06 15:19:27 -0700 | [diff] [blame] | 516 | KPROBE_ENTRY(debug) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | 	cmpl $sysenter_entry,(%esp) | 
 | 518 | 	jne debug_stack_correct | 
 | 519 | 	FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) | 
 | 520 | debug_stack_correct: | 
 | 521 | 	pushl $-1			# mark this as an int | 
 | 522 | 	SAVE_ALL | 
 | 523 | 	xorl %edx,%edx			# error code 0 | 
 | 524 | 	movl %esp,%eax			# pt_regs pointer | 
 | 525 | 	call do_debug | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | 	jmp ret_from_exception | 
| Prasanna S Panchamukhi | 3d97ae5 | 2005-09-06 15:19:27 -0700 | [diff] [blame] | 527 | 	.previous .text | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | /* | 
 | 529 |  * NMI is doubly nasty. It can happen _while_ we're handling | 
 | 530 |  * a debug fault, and the debug fault hasn't yet been able to | 
 | 531 |  * clear up the stack. So we first check whether we got  an | 
 | 532 |  * NMI on the sysenter entry path, but after that we need to | 
 | 533 |  * check whether we got an NMI on the debug path where the debug | 
 | 534 |  * fault happened on the sysenter path. | 
 | 535 |  */ | 
 | 536 | ENTRY(nmi) | 
 | 537 | 	pushl %eax | 
 | 538 | 	movl %ss, %eax | 
 | 539 | 	cmpw $__ESPFIX_SS, %ax | 
 | 540 | 	popl %eax | 
 | 541 | 	je nmi_16bit_stack | 
 | 542 | 	cmpl $sysenter_entry,(%esp) | 
 | 543 | 	je nmi_stack_fixup | 
 | 544 | 	pushl %eax | 
 | 545 | 	movl %esp,%eax | 
 | 546 | 	/* Do not access memory above the end of our stack page, | 
 | 547 | 	 * it might not exist. | 
 | 548 | 	 */ | 
 | 549 | 	andl $(THREAD_SIZE-1),%eax | 
 | 550 | 	cmpl $(THREAD_SIZE-20),%eax | 
 | 551 | 	popl %eax | 
 | 552 | 	jae nmi_stack_correct | 
 | 553 | 	cmpl $sysenter_entry,12(%esp) | 
 | 554 | 	je nmi_debug_stack_check | 
 | 555 | nmi_stack_correct: | 
 | 556 | 	pushl %eax | 
 | 557 | 	SAVE_ALL | 
 | 558 | 	xorl %edx,%edx		# zero error code | 
 | 559 | 	movl %esp,%eax		# pt_regs pointer | 
 | 560 | 	call do_nmi | 
 | 561 | 	jmp restore_all | 
 | 562 |  | 
 | 563 | nmi_stack_fixup: | 
 | 564 | 	FIX_STACK(12,nmi_stack_correct, 1) | 
 | 565 | 	jmp nmi_stack_correct | 
 | 566 | nmi_debug_stack_check: | 
 | 567 | 	cmpw $__KERNEL_CS,16(%esp) | 
 | 568 | 	jne nmi_stack_correct | 
| Jan Beulich | e271820 | 2005-11-13 16:06:52 -0800 | [diff] [blame] | 569 | 	cmpl $debug,(%esp) | 
 | 570 | 	jb nmi_stack_correct | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | 	cmpl $debug_esp_fix_insn,(%esp) | 
| Jan Beulich | e271820 | 2005-11-13 16:06:52 -0800 | [diff] [blame] | 572 | 	ja nmi_stack_correct | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | 	FIX_STACK(24,nmi_stack_correct, 1) | 
 | 574 | 	jmp nmi_stack_correct | 
 | 575 |  | 
 | 576 | nmi_16bit_stack: | 
 | 577 | 	/* create the pointer to lss back */ | 
 | 578 | 	pushl %ss | 
 | 579 | 	pushl %esp | 
 | 580 | 	movzwl %sp, %esp | 
 | 581 | 	addw $4, (%esp) | 
 | 582 | 	/* copy the iret frame of 12 bytes */ | 
 | 583 | 	.rept 3 | 
 | 584 | 	pushl 16(%esp) | 
 | 585 | 	.endr | 
 | 586 | 	pushl %eax | 
 | 587 | 	SAVE_ALL | 
 | 588 | 	FIXUP_ESPFIX_STACK		# %eax == %esp | 
 | 589 | 	xorl %edx,%edx			# zero error code | 
 | 590 | 	call do_nmi | 
 | 591 | 	RESTORE_REGS | 
 | 592 | 	lss 12+4(%esp), %esp		# back to 16bit stack | 
 | 593 | 1:	iret | 
 | 594 | .section __ex_table,"a" | 
 | 595 | 	.align 4 | 
 | 596 | 	.long 1b,iret_exc | 
 | 597 | .previous | 
 | 598 |  | 
| Prasanna S Panchamukhi | 3d97ae5 | 2005-09-06 15:19:27 -0700 | [diff] [blame] | 599 | KPROBE_ENTRY(int3) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | 	pushl $-1			# mark this as an int | 
 | 601 | 	SAVE_ALL | 
 | 602 | 	xorl %edx,%edx		# zero error code | 
 | 603 | 	movl %esp,%eax		# pt_regs pointer | 
 | 604 | 	call do_int3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | 	jmp ret_from_exception | 
| Prasanna S Panchamukhi | 3d97ae5 | 2005-09-06 15:19:27 -0700 | [diff] [blame] | 606 | 	.previous .text | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 |  | 
 | 608 | ENTRY(overflow) | 
 | 609 | 	pushl $0 | 
 | 610 | 	pushl $do_overflow | 
 | 611 | 	jmp error_code | 
 | 612 |  | 
 | 613 | ENTRY(bounds) | 
 | 614 | 	pushl $0 | 
 | 615 | 	pushl $do_bounds | 
 | 616 | 	jmp error_code | 
 | 617 |  | 
 | 618 | ENTRY(invalid_op) | 
 | 619 | 	pushl $0 | 
 | 620 | 	pushl $do_invalid_op | 
 | 621 | 	jmp error_code | 
 | 622 |  | 
 | 623 | ENTRY(coprocessor_segment_overrun) | 
 | 624 | 	pushl $0 | 
 | 625 | 	pushl $do_coprocessor_segment_overrun | 
 | 626 | 	jmp error_code | 
 | 627 |  | 
 | 628 | ENTRY(invalid_TSS) | 
 | 629 | 	pushl $do_invalid_TSS | 
 | 630 | 	jmp error_code | 
 | 631 |  | 
 | 632 | ENTRY(segment_not_present) | 
 | 633 | 	pushl $do_segment_not_present | 
 | 634 | 	jmp error_code | 
 | 635 |  | 
 | 636 | ENTRY(stack_segment) | 
 | 637 | 	pushl $do_stack_segment | 
 | 638 | 	jmp error_code | 
 | 639 |  | 
| Prasanna S Panchamukhi | 3d97ae5 | 2005-09-06 15:19:27 -0700 | [diff] [blame] | 640 | KPROBE_ENTRY(general_protection) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | 	pushl $do_general_protection | 
 | 642 | 	jmp error_code | 
| Prasanna S Panchamukhi | 3d97ae5 | 2005-09-06 15:19:27 -0700 | [diff] [blame] | 643 | 	.previous .text | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 |  | 
 | 645 | ENTRY(alignment_check) | 
 | 646 | 	pushl $do_alignment_check | 
 | 647 | 	jmp error_code | 
 | 648 |  | 
| Prasanna S Panchamukhi | 3d97ae5 | 2005-09-06 15:19:27 -0700 | [diff] [blame] | 649 | KPROBE_ENTRY(page_fault) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | 	pushl $do_page_fault | 
 | 651 | 	jmp error_code | 
| Prasanna S Panchamukhi | 3d97ae5 | 2005-09-06 15:19:27 -0700 | [diff] [blame] | 652 | 	.previous .text | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 |  | 
 | 654 | #ifdef CONFIG_X86_MCE | 
 | 655 | ENTRY(machine_check) | 
 | 656 | 	pushl $0 | 
 | 657 | 	pushl machine_check_vector | 
 | 658 | 	jmp error_code | 
 | 659 | #endif | 
 | 660 |  | 
 | 661 | ENTRY(spurious_interrupt_bug) | 
 | 662 | 	pushl $0 | 
 | 663 | 	pushl $do_spurious_interrupt_bug | 
 | 664 | 	jmp error_code | 
 | 665 |  | 
| Arjan van de Ven | bb152f5 | 2006-01-06 00:12:05 -0800 | [diff] [blame] | 666 | .section .rodata,"a" | 
| Paolo 'Blaisorblade' Giarrusso | 5e7b83f | 2005-05-01 08:58:55 -0700 | [diff] [blame] | 667 | #include "syscall_table.S" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 |  | 
 | 669 | syscall_table_size=(.-sys_call_table) |