x86: get rid of duplicate code in case of CONFIG_VM86
no need to have the call of do_notify_resume() + checks around it
duplicated for vm86 case - a bit of rearranging of ifdefs and we'll
have a perfectly fine copy to jump back to.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index b6bb692..fe4cc30 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -618,22 +618,7 @@
movl %esp, %eax
jne work_notifysig_v86 # returning to kernel-space or
# vm86-space
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
- movb PT_CS(%esp), %bl
- andb $SEGMENT_RPL_MASK, %bl
- cmpb $USER_RPL, %bl
- jb resume_kernel
- xorl %edx, %edx
- call do_notify_resume
- jmp resume_userspace
-
- ALIGN
-work_notifysig_v86:
- pushl_cfi %ecx # save ti_flags for do_notify_resume
- call save_v86_state # %eax contains pt_regs pointer
- popl_cfi %ecx
- movl %eax, %esp
+1:
#else
movl %esp, %eax
#endif
@@ -646,6 +631,16 @@
xorl %edx, %edx
call do_notify_resume
jmp resume_userspace
+
+#ifdef CONFIG_VM86
+ ALIGN
+work_notifysig_v86:
+ pushl_cfi %ecx # save ti_flags for do_notify_resume
+ call save_v86_state # %eax contains pt_regs pointer
+ popl_cfi %ecx
+ movl %eax, %esp
+ jmp 1b
+#endif
END(work_pending)
# perform syscall exit tracing