blob: cdab57c5bc70736b154a90ba11f54a3b6cf74d3d [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15#include <linux/compiler.h>
16#include <linux/err.h>
17#include <linux/fs.h>
18#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/module.h>
22#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010023#include <linux/timer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010024#include <asm/lowcore.h>
25#include <asm/pgtable.h>
26
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010027#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include "gaccess.h"
29
30#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32struct kvm_stats_debugfs_item debugfs_entries[] = {
33 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020034 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010035 { "exit_validity", VCPU_STAT(exit_validity) },
36 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
37 { "exit_external_request", VCPU_STAT(exit_external_request) },
38 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010039 { "exit_instruction", VCPU_STAT(exit_instruction) },
40 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
42 { "instruction_lctg", VCPU_STAT(instruction_lctg) },
43 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
44 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
46 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
47 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
48 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
49 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
50 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
51 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010052 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
53 { "instruction_spx", VCPU_STAT(instruction_spx) },
54 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
55 { "instruction_stap", VCPU_STAT(instruction_stap) },
56 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
57 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
58 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
59 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
60 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010061 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
62 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
63 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
64 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
65 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
66 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010067 { "diagnose_44", VCPU_STAT(diagnose_44) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010068 { NULL }
69};
70
71
72/* Section: not file related */
73void kvm_arch_hardware_enable(void *garbage)
74{
75 /* every s390 is virtualization enabled ;-) */
76}
77
78void kvm_arch_hardware_disable(void *garbage)
79{
80}
81
Heiko Carstensb0c632d2008-03-25 18:47:20 +010082int kvm_arch_hardware_setup(void)
83{
84 return 0;
85}
86
87void kvm_arch_hardware_unsetup(void)
88{
89}
90
91void kvm_arch_check_processor_compat(void *rtn)
92{
93}
94
95int kvm_arch_init(void *opaque)
96{
97 return 0;
98}
99
100void kvm_arch_exit(void)
101{
102}
103
104/* Section: device related */
105long kvm_arch_dev_ioctl(struct file *filp,
106 unsigned int ioctl, unsigned long arg)
107{
108 if (ioctl == KVM_S390_ENABLE_SIE)
109 return s390_enable_sie();
110 return -EINVAL;
111}
112
113int kvm_dev_ioctl_check_extension(long ext)
114{
115 return 0;
116}
117
118/* Section: vm related */
119/*
120 * Get (and clear) the dirty memory log for a memory slot.
121 */
122int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
123 struct kvm_dirty_log *log)
124{
125 return 0;
126}
127
128long kvm_arch_vm_ioctl(struct file *filp,
129 unsigned int ioctl, unsigned long arg)
130{
131 struct kvm *kvm = filp->private_data;
132 void __user *argp = (void __user *)arg;
133 int r;
134
135 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100136 case KVM_S390_INTERRUPT: {
137 struct kvm_s390_interrupt s390int;
138
139 r = -EFAULT;
140 if (copy_from_user(&s390int, argp, sizeof(s390int)))
141 break;
142 r = kvm_s390_inject_vm(kvm, &s390int);
143 break;
144 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100145 default:
146 r = -EINVAL;
147 }
148
149 return r;
150}
151
152struct kvm *kvm_arch_create_vm(void)
153{
154 struct kvm *kvm;
155 int rc;
156 char debug_name[16];
157
158 rc = s390_enable_sie();
159 if (rc)
160 goto out_nokvm;
161
162 rc = -ENOMEM;
163 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
164 if (!kvm)
165 goto out_nokvm;
166
167 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
168 if (!kvm->arch.sca)
169 goto out_nosca;
170
171 sprintf(debug_name, "kvm-%u", current->pid);
172
173 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
174 if (!kvm->arch.dbf)
175 goto out_nodbf;
176
Carsten Otteba5c1e92008-03-25 18:47:26 +0100177 spin_lock_init(&kvm->arch.float_int.lock);
178 INIT_LIST_HEAD(&kvm->arch.float_int.list);
179
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100180 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
181 VM_EVENT(kvm, 3, "%s", "vm created");
182
183 try_module_get(THIS_MODULE);
184
185 return kvm;
186out_nodbf:
187 free_page((unsigned long)(kvm->arch.sca));
188out_nosca:
189 kfree(kvm);
190out_nokvm:
191 return ERR_PTR(rc);
192}
193
194void kvm_arch_destroy_vm(struct kvm *kvm)
195{
196 debug_unregister(kvm->arch.dbf);
Carsten Ottedfdded72008-06-27 15:05:34 +0200197 kvm_free_physmem(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100198 free_page((unsigned long)(kvm->arch.sca));
199 kfree(kvm);
200 module_put(THIS_MODULE);
201}
202
203/* Section: vcpu related */
204int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
205{
206 return 0;
207}
208
209void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
210{
211 /* kvm common code refers to this, but does'nt call it */
212 BUG();
213}
214
215void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
216{
217 save_fp_regs(&vcpu->arch.host_fpregs);
218 save_access_regs(vcpu->arch.host_acrs);
219 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
220 restore_fp_regs(&vcpu->arch.guest_fpregs);
221 restore_access_regs(vcpu->arch.guest_acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100222}
223
224void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
225{
226 save_fp_regs(&vcpu->arch.guest_fpregs);
227 save_access_regs(vcpu->arch.guest_acrs);
228 restore_fp_regs(&vcpu->arch.host_fpregs);
229 restore_access_regs(vcpu->arch.host_acrs);
230}
231
232static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
233{
234 /* this equals initial cpu reset in pop, but we don't switch to ESA */
235 vcpu->arch.sie_block->gpsw.mask = 0UL;
236 vcpu->arch.sie_block->gpsw.addr = 0UL;
237 vcpu->arch.sie_block->prefix = 0UL;
238 vcpu->arch.sie_block->ihcpu = 0xffff;
239 vcpu->arch.sie_block->cputm = 0UL;
240 vcpu->arch.sie_block->ckc = 0UL;
241 vcpu->arch.sie_block->todpr = 0;
242 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
243 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
244 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
245 vcpu->arch.guest_fpregs.fpc = 0;
246 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
247 vcpu->arch.sie_block->gbea = 1;
248}
249
Christian Borntraeger4da29e92008-06-27 15:05:38 +0200250/* The current code can have up to 256 pages for virtio */
251#define VIRTIODESCSPACE (256ul * 4096ul)
252
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100253int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
254{
255 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
Christian Borntraeger4da29e92008-06-27 15:05:38 +0200256 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
257 vcpu->kvm->arch.guest_origin +
258 VIRTIODESCSPACE - 1ul;
259 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100260 vcpu->arch.sie_block->ecb = 2;
261 vcpu->arch.sie_block->eca = 0xC1002001U;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100262 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
263 (unsigned long) vcpu);
Christian Borntraeger453423d2008-03-25 18:47:29 +0100264 get_cpu_id(&vcpu->arch.cpu_id);
265 vcpu->arch.cpu_id.version = 0xfe;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100266 return 0;
267}
268
269struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
270 unsigned int id)
271{
272 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
273 int rc = -ENOMEM;
274
275 if (!vcpu)
276 goto out_nomem;
277
278 vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
279
280 if (!vcpu->arch.sie_block)
281 goto out_free_cpu;
282
283 vcpu->arch.sie_block->icpua = id;
284 BUG_ON(!kvm->arch.sca);
285 BUG_ON(kvm->arch.sca->cpu[id].sda);
286 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
287 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
288 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
289
Carsten Otteba5c1e92008-03-25 18:47:26 +0100290 spin_lock_init(&vcpu->arch.local_int.lock);
291 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
292 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
293 spin_lock_bh(&kvm->arch.float_int.lock);
294 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
295 init_waitqueue_head(&vcpu->arch.local_int.wq);
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100296 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100297 spin_unlock_bh(&kvm->arch.float_int.lock);
298
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100299 rc = kvm_vcpu_init(vcpu, kvm, id);
300 if (rc)
301 goto out_free_cpu;
302 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
303 vcpu->arch.sie_block);
304
305 try_module_get(THIS_MODULE);
306
307 return vcpu;
308out_free_cpu:
309 kfree(vcpu);
310out_nomem:
311 return ERR_PTR(rc);
312}
313
314void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
315{
316 VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
317 free_page((unsigned long)(vcpu->arch.sie_block));
318 kfree(vcpu);
319 module_put(THIS_MODULE);
320}
321
322int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
323{
324 /* kvm common code refers to this, but never calls it */
325 BUG();
326 return 0;
327}
328
329static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
330{
331 vcpu_load(vcpu);
332 kvm_s390_vcpu_initial_reset(vcpu);
333 vcpu_put(vcpu);
334 return 0;
335}
336
337int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
338{
339 vcpu_load(vcpu);
340 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
341 vcpu_put(vcpu);
342 return 0;
343}
344
345int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
346{
347 vcpu_load(vcpu);
348 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
349 vcpu_put(vcpu);
350 return 0;
351}
352
353int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
354 struct kvm_sregs *sregs)
355{
356 vcpu_load(vcpu);
357 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
358 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
359 vcpu_put(vcpu);
360 return 0;
361}
362
363int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
364 struct kvm_sregs *sregs)
365{
366 vcpu_load(vcpu);
367 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
368 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
369 vcpu_put(vcpu);
370 return 0;
371}
372
373int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
374{
375 vcpu_load(vcpu);
376 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
377 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
378 vcpu_put(vcpu);
379 return 0;
380}
381
382int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
383{
384 vcpu_load(vcpu);
385 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
386 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
387 vcpu_put(vcpu);
388 return 0;
389}
390
391static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
392{
393 int rc = 0;
394
395 vcpu_load(vcpu);
396 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
397 rc = -EBUSY;
398 else
399 vcpu->arch.sie_block->gpsw = psw;
400 vcpu_put(vcpu);
401 return rc;
402}
403
404int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
405 struct kvm_translation *tr)
406{
407 return -EINVAL; /* not implemented yet */
408}
409
410int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
411 struct kvm_debug_guest *dbg)
412{
413 return -EINVAL; /* not implemented yet */
414}
415
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300416int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
417 struct kvm_mp_state *mp_state)
418{
419 return -EINVAL; /* not implemented yet */
420}
421
422int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
423 struct kvm_mp_state *mp_state)
424{
425 return -EINVAL; /* not implemented yet */
426}
427
Christian Borntraeger71cde582008-05-21 13:37:34 +0200428extern void s390_handle_mcck(void);
429
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100430static void __vcpu_run(struct kvm_vcpu *vcpu)
431{
432 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
433
434 if (need_resched())
435 schedule();
436
Christian Borntraeger71cde582008-05-21 13:37:34 +0200437 if (test_thread_flag(TIF_MCCK_PENDING))
438 s390_handle_mcck();
439
Carsten Otte0ff31862008-05-21 13:37:37 +0200440 kvm_s390_deliver_pending_interrupts(vcpu);
441
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100442 vcpu->arch.sie_block->icptcode = 0;
443 local_irq_disable();
444 kvm_guest_enter();
445 local_irq_enable();
446 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
447 atomic_read(&vcpu->arch.sie_block->cpuflags));
Carsten Otte1f0d0f02008-05-21 13:37:40 +0200448 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
449 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
450 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
451 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100452 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
453 vcpu->arch.sie_block->icptcode);
454 local_irq_disable();
455 kvm_guest_exit();
456 local_irq_enable();
457
458 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
459}
460
461int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
462{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100463 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100464 sigset_t sigsaved;
465
466 vcpu_load(vcpu);
467
468 if (vcpu->sigset_active)
469 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
470
471 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
472
Carsten Otteba5c1e92008-03-25 18:47:26 +0100473 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
474
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100475 switch (kvm_run->exit_reason) {
476 case KVM_EXIT_S390_SIEIC:
477 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
478 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
479 break;
480 case KVM_EXIT_UNKNOWN:
481 case KVM_EXIT_S390_RESET:
482 break;
483 default:
484 BUG();
485 }
486
487 might_sleep();
488
489 do {
490 __vcpu_run(vcpu);
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100491 rc = kvm_handle_sie_intercept(vcpu);
492 } while (!signal_pending(current) && !rc);
493
494 if (signal_pending(current) && !rc)
495 rc = -EINTR;
496
497 if (rc == -ENOTSUPP) {
498 /* intercept cannot be handled in-kernel, prepare kvm-run */
499 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
500 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
501 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
502 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
503 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
504 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
505 rc = 0;
506 }
507
508 if (rc == -EREMOTE) {
509 /* intercept was handled, but userspace support is needed
510 * kvm_run has been prepared by the handler */
511 rc = 0;
512 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100513
514 if (vcpu->sigset_active)
515 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
516
517 vcpu_put(vcpu);
518
519 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +0200520 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100521}
522
523static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
524 unsigned long n, int prefix)
525{
526 if (prefix)
527 return copy_to_guest(vcpu, guestdest, from, n);
528 else
529 return copy_to_guest_absolute(vcpu, guestdest, from, n);
530}
531
532/*
533 * store status at address
534 * we use have two special cases:
535 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
536 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
537 */
538int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
539{
540 const unsigned char archmode = 1;
541 int prefix;
542
543 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
544 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
545 return -EFAULT;
546 addr = SAVE_AREA_BASE;
547 prefix = 0;
548 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
549 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
550 return -EFAULT;
551 addr = SAVE_AREA_BASE;
552 prefix = 1;
553 } else
554 prefix = 0;
555
556 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
557 vcpu->arch.guest_fpregs.fprs, 128, prefix))
558 return -EFAULT;
559
560 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
561 vcpu->arch.guest_gprs, 128, prefix))
562 return -EFAULT;
563
564 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
565 &vcpu->arch.sie_block->gpsw, 16, prefix))
566 return -EFAULT;
567
568 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
569 &vcpu->arch.sie_block->prefix, 4, prefix))
570 return -EFAULT;
571
572 if (__guestcopy(vcpu,
573 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
574 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
575 return -EFAULT;
576
577 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
578 &vcpu->arch.sie_block->todpr, 4, prefix))
579 return -EFAULT;
580
581 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
582 &vcpu->arch.sie_block->cputm, 8, prefix))
583 return -EFAULT;
584
585 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
586 &vcpu->arch.sie_block->ckc, 8, prefix))
587 return -EFAULT;
588
589 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
590 &vcpu->arch.guest_acrs, 64, prefix))
591 return -EFAULT;
592
593 if (__guestcopy(vcpu,
594 addr + offsetof(struct save_area_s390x, ctrl_regs),
595 &vcpu->arch.sie_block->gcr, 128, prefix))
596 return -EFAULT;
597 return 0;
598}
599
600static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
601{
602 int rc;
603
604 vcpu_load(vcpu);
605 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
606 vcpu_put(vcpu);
607 return rc;
608}
609
610long kvm_arch_vcpu_ioctl(struct file *filp,
611 unsigned int ioctl, unsigned long arg)
612{
613 struct kvm_vcpu *vcpu = filp->private_data;
614 void __user *argp = (void __user *)arg;
615
616 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100617 case KVM_S390_INTERRUPT: {
618 struct kvm_s390_interrupt s390int;
619
620 if (copy_from_user(&s390int, argp, sizeof(s390int)))
621 return -EFAULT;
622 return kvm_s390_inject_vcpu(vcpu, &s390int);
623 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100624 case KVM_S390_STORE_STATUS:
625 return kvm_s390_vcpu_store_status(vcpu, arg);
626 case KVM_S390_SET_INITIAL_PSW: {
627 psw_t psw;
628
629 if (copy_from_user(&psw, argp, sizeof(psw)))
630 return -EFAULT;
631 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
632 }
633 case KVM_S390_INITIAL_RESET:
634 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
635 default:
636 ;
637 }
638 return -EINVAL;
639}
640
641/* Section: memory related */
642int kvm_arch_set_memory_region(struct kvm *kvm,
643 struct kvm_userspace_memory_region *mem,
644 struct kvm_memory_slot old,
645 int user_alloc)
646{
647 /* A few sanity checks. We can have exactly one memory slot which has
648 to start at guest virtual zero and which has to be located at a
649 page boundary in userland and which has to end at a page boundary.
650 The memory in userland is ok to be fragmented into various different
651 vmas. It is okay to mmap() and munmap() stuff in this slot after
652 doing this call at any time */
653
654 if (mem->slot)
655 return -EINVAL;
656
657 if (mem->guest_phys_addr)
658 return -EINVAL;
659
660 if (mem->userspace_addr & (PAGE_SIZE - 1))
661 return -EINVAL;
662
663 if (mem->memory_size & (PAGE_SIZE - 1))
664 return -EINVAL;
665
666 kvm->arch.guest_origin = mem->userspace_addr;
667 kvm->arch.guest_memsize = mem->memory_size;
668
669 /* FIXME: we do want to interrupt running CPUs and update their memory
670 configuration now to avoid race conditions. But hey, changing the
671 memory layout while virtual CPUs are running is usually bad
672 programming practice. */
673
674 return 0;
675}
676
677gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
678{
679 return gfn;
680}
681
682static int __init kvm_s390_init(void)
683{
684 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
685}
686
687static void __exit kvm_s390_exit(void)
688{
689 kvm_exit();
690}
691
692module_init(kvm_s390_init);
693module_exit(kvm_s390_exit);