blob: 5a23e89936784fd7c70b9eb414f9b90ae9beb829 [file] [log] [blame]
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -07001/*
2 Asm versions of Xen pv-ops, suitable for either direct use or inlining.
3 The inline versions are the same as the direct-use versions, with the
4 pre- and post-amble chopped off.
5
6 This code is encoded for size rather than absolute efficiency,
7 with a view to being able to inline as much as possible.
8
9 We only bother with direct forms (ie, vcpu in pda) of the operations
10 here; the indirect forms are better handled in C, since they're
11 generally too large to inline anyway.
12 */
13
14#include <linux/linkage.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/processor-flags.h>
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -070018#include <asm/errno.h>
19#include <asm/segment.h>
Brian Gerst9af45652009-01-19 00:38:58 +090020#include <asm/percpu.h>
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -070021
22#include <xen/interface/xen.h>
23
24#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
25#define ENDPATCH(x) .globl x##_end; x##_end=.
26
27/* Pseudo-flag used for virtual NMI, which we don't implement yet */
28#define XEN_EFLAGS_NMI 0x80000000
29
Jeremy Fitzhardingedb053b82008-10-02 16:41:31 -070030#if 1
31/*
Brian Gerst9af45652009-01-19 00:38:58 +090032 FIXME: x86_64 now can support direct access to percpu variables
33 via a segment override. Update xen accordingly.
Jeremy Fitzhardingedb053b82008-10-02 16:41:31 -070034 */
35#define BUG ud2a
Jeremy Fitzhardingedb053b82008-10-02 16:41:31 -070036#endif
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -070037
38/*
39 Enable events. This clears the event mask and tests the pending
40 event status with one and operation. If there are pending
41 events, then enter the hypervisor to get them handled.
42 */
43ENTRY(xen_irq_enable_direct)
Jeremy Fitzhardingedb053b82008-10-02 16:41:31 -070044 BUG
45
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -070046 /* Unmask events */
Brian Gerst9af45652009-01-19 00:38:58 +090047 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -070048
49 /* Preempt here doesn't matter because that will deal with
50 any pending interrupts. The pending check may end up being
51 run on the wrong CPU, but that doesn't hurt. */
52
53 /* Test for pending */
Brian Gerst9af45652009-01-19 00:38:58 +090054 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -070055 jz 1f
56
572: call check_events
581:
59ENDPATCH(xen_irq_enable_direct)
60 ret
61 ENDPROC(xen_irq_enable_direct)
62 RELOC(xen_irq_enable_direct, 2b+1)
63
64/*
65 Disabling events is simply a matter of making the event mask
66 non-zero.
67 */
68ENTRY(xen_irq_disable_direct)
Jeremy Fitzhardingedb053b82008-10-02 16:41:31 -070069 BUG
70
Brian Gerst9af45652009-01-19 00:38:58 +090071 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -070072ENDPATCH(xen_irq_disable_direct)
73 ret
74 ENDPROC(xen_irq_disable_direct)
75 RELOC(xen_irq_disable_direct, 0)
76
77/*
78 (xen_)save_fl is used to get the current interrupt enable status.
79 Callers expect the status to be in X86_EFLAGS_IF, and other bits
80 may be set in the return value. We take advantage of this by
81 making sure that X86_EFLAGS_IF has the right value (and other bits
82 in that byte are 0), but other bits in the return value are
83 undefined. We need to toggle the state of the bit, because
84 Xen and x86 use opposite senses (mask vs enable).
85 */
86ENTRY(xen_save_fl_direct)
Jeremy Fitzhardingedb053b82008-10-02 16:41:31 -070087 BUG
88
Brian Gerst9af45652009-01-19 00:38:58 +090089 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -070090 setz %ah
91 addb %ah,%ah
92ENDPATCH(xen_save_fl_direct)
93 ret
94 ENDPROC(xen_save_fl_direct)
95 RELOC(xen_save_fl_direct, 0)
96
97/*
98 In principle the caller should be passing us a value return
99 from xen_save_fl_direct, but for robustness sake we test only
100 the X86_EFLAGS_IF flag rather than the whole byte. After
101 setting the interrupt mask state, it checks for unmasked
102 pending events and enters the hypervisor to get them delivered
103 if so.
104 */
105ENTRY(xen_restore_fl_direct)
Jeremy Fitzhardingedb053b82008-10-02 16:41:31 -0700106 BUG
107
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -0700108 testb $X86_EFLAGS_IF>>8, %ah
Brian Gerst9af45652009-01-19 00:38:58 +0900109 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -0700110 /* Preempt here doesn't matter because that will deal with
111 any pending interrupts. The pending check may end up being
112 run on the wrong CPU, but that doesn't hurt. */
113
114 /* check for unmasked and pending */
Brian Gerst9af45652009-01-19 00:38:58 +0900115 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -0700116 jz 1f
1172: call check_events
1181:
119ENDPATCH(xen_restore_fl_direct)
120 ret
121 ENDPROC(xen_restore_fl_direct)
122 RELOC(xen_restore_fl_direct, 2b+1)
123
124
125/*
126 Force an event check by making a hypercall,
127 but preserve regs before making the call.
128 */
129check_events:
130 push %rax
131 push %rcx
132 push %rdx
133 push %rsi
134 push %rdi
135 push %r8
136 push %r9
137 push %r10
138 push %r11
Jeremy Fitzhardinge0d1edf42008-07-28 11:53:57 -0700139 call xen_force_evtchn_callback
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -0700140 pop %r11
141 pop %r10
142 pop %r9
143 pop %r8
144 pop %rdi
145 pop %rsi
146 pop %rdx
147 pop %rcx
148 pop %rax
149 ret
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -0700150
Jeremy Fitzhardinge997409d2008-07-08 15:07:00 -0700151ENTRY(xen_adjust_exception_frame)
152 mov 8+0(%rsp),%rcx
153 mov 8+8(%rsp),%r11
154 ret $16
155
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700156hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
157/*
158 Xen64 iret frame:
159
160 ss
161 rsp
162 rflags
163 cs
164 rip <-- standard iret frame
165
166 flags
167
168 rcx }
169 r11 }<-- pushed by hypercall page
170rsp -> rax }
171 */
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -0700172ENTRY(xen_iret)
173 pushq $0
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001741: jmp hypercall_iret
175ENDPATCH(xen_iret)
176RELOC(xen_iret, 1b+1)
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -0700177
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700178/*
179 sysexit is not used for 64-bit processes, so it's
180 only ever used to return to 32-bit compat userspace.
181 */
Jeremy Fitzhardingecdacc122008-07-08 15:06:46 -0700182ENTRY(xen_sysexit)
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700183 pushq $__USER32_DS
184 pushq %rcx
185 pushq $X86_EFLAGS_IF
186 pushq $__USER32_CS
187 pushq %rdx
188
Jeremy Fitzhardinge2dc16972008-07-21 16:49:58 -0700189 pushq $0
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001901: jmp hypercall_iret
191ENDPATCH(xen_sysexit)
192RELOC(xen_sysexit, 1b+1)
193
194ENTRY(xen_sysret64)
195 /* We're already on the usermode stack at this point, but still
196 with the kernel gs, so we can easily switch back */
197 movq %rsp, %gs:pda_oldrsp
Brian Gerst9af45652009-01-19 00:38:58 +0900198 movq PER_CPU_VAR(kernel_stack),%rsp
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700199
200 pushq $__USER_DS
201 pushq %gs:pda_oldrsp
202 pushq %r11
203 pushq $__USER_CS
204 pushq %rcx
205
206 pushq $VGCF_in_syscall
2071: jmp hypercall_iret
208ENDPATCH(xen_sysret64)
209RELOC(xen_sysret64, 1b+1)
210
211ENTRY(xen_sysret32)
212 /* We're already on the usermode stack at this point, but still
213 with the kernel gs, so we can easily switch back */
214 movq %rsp, %gs:pda_oldrsp
Brian Gerst9af45652009-01-19 00:38:58 +0900215 movq PER_CPU_VAR(kernel_stack), %rsp
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700216
217 pushq $__USER32_DS
218 pushq %gs:pda_oldrsp
219 pushq %r11
220 pushq $__USER32_CS
221 pushq %rcx
222
223 pushq $VGCF_in_syscall
2241: jmp hypercall_iret
225ENDPATCH(xen_sysret32)
226RELOC(xen_sysret32, 1b+1)
227
228/*
229 Xen handles syscall callbacks much like ordinary exceptions,
230 which means we have:
231 - kernel gs
232 - kernel rsp
233 - an iret-like stack frame on the stack (including rcx and r11):
234 ss
235 rsp
236 rflags
237 cs
238 rip
239 r11
240 rsp-> rcx
241
242 In all the entrypoints, we undo all that to make it look
243 like a CPU-generated syscall/sysenter and jump to the normal
244 entrypoint.
245 */
246
247.macro undo_xen_syscall
248 mov 0*8(%rsp),%rcx
249 mov 1*8(%rsp),%r11
250 mov 5*8(%rsp),%rsp
251.endm
252
253/* Normal 64-bit system call target */
254ENTRY(xen_syscall_target)
255 undo_xen_syscall
256 jmp system_call_after_swapgs
257ENDPROC(xen_syscall_target)
258
259#ifdef CONFIG_IA32_EMULATION
260
261/* 32-bit compat syscall target */
262ENTRY(xen_syscall32_target)
263 undo_xen_syscall
264 jmp ia32_cstar_target
265ENDPROC(xen_syscall32_target)
266
267/* 32-bit compat sysenter target */
268ENTRY(xen_sysenter_target)
269 undo_xen_syscall
270 jmp ia32_sysenter_target
271ENDPROC(xen_sysenter_target)
272
273#else /* !CONFIG_IA32_EMULATION */
274
275ENTRY(xen_syscall32_target)
276ENTRY(xen_sysenter_target)
277 lea 16(%rsp), %rsp /* strip %rcx,%r11 */
278 mov $-ENOSYS, %rax
279 pushq $VGCF_in_syscall
280 jmp hypercall_iret
281ENDPROC(xen_syscall32_target)
282ENDPROC(xen_sysenter_target)
283
284#endif /* CONFIG_IA32_EMULATION */