blob: d386b6198bc73ccc73a7db3e1e978cb3e086b202 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
9 *
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
12 *
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/preempt.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040027#include <linux/export.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000028#include <linux/fs.h>
29#include <linux/anon_inodes.h>
30#include <linux/cpumask.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000031#include <linux/spinlock.h>
32#include <linux/page-flags.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000033
34#include <asm/reg.h>
35#include <asm/cputable.h>
36#include <asm/cacheflush.h>
37#include <asm/tlbflush.h>
38#include <asm/uaccess.h>
39#include <asm/io.h>
40#include <asm/kvm_ppc.h>
41#include <asm/kvm_book3s.h>
42#include <asm/mmu_context.h>
43#include <asm/lppaca.h>
44#include <asm/processor.h>
Paul Mackerras371fefd2011-06-29 00:23:08 +000045#include <asm/cputhreads.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000046#include <asm/page.h>
Michael Neulingde1d9242011-11-09 20:39:49 +000047#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000048#include <linux/gfp.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000049#include <linux/vmalloc.h>
50#include <linux/highmem.h>
Paul Mackerrasc77162d2011-12-12 12:31:00 +000051#include <linux/hugetlb.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000052
53/* #define EXIT_DEBUG */
54/* #define EXIT_DEBUG_SIMPLE */
55/* #define EXIT_DEBUG_INT */
56
Paul Mackerras19ccb762011-07-23 17:42:46 +100057static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
Paul Mackerrasc77162d2011-12-12 12:31:00 +000058static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +100059
Paul Mackerrasde56a942011-06-29 00:21:34 +000060void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
61{
62 local_paca->kvm_hstate.kvm_vcpu = vcpu;
Paul Mackerras371fefd2011-06-29 00:23:08 +000063 local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
Paul Mackerrasde56a942011-06-29 00:21:34 +000064}
65
66void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
67{
68}
69
Paul Mackerrasde56a942011-06-29 00:21:34 +000070void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
71{
72 vcpu->arch.shregs.msr = msr;
Paul Mackerras19ccb762011-07-23 17:42:46 +100073 kvmppc_end_cede(vcpu);
Paul Mackerrasde56a942011-06-29 00:21:34 +000074}
75
76void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
77{
78 vcpu->arch.pvr = pvr;
79}
80
81void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
82{
83 int r;
84
85 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
86 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
87 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
88 for (r = 0; r < 16; ++r)
89 pr_err("r%2d = %.16lx r%d = %.16lx\n",
90 r, kvmppc_get_gpr(vcpu, r),
91 r+16, kvmppc_get_gpr(vcpu, r+16));
92 pr_err("ctr = %.16lx lr = %.16lx\n",
93 vcpu->arch.ctr, vcpu->arch.lr);
94 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
95 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
96 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
97 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
98 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
99 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
100 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
101 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
102 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
103 pr_err("fault dar = %.16lx dsisr = %.8x\n",
104 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
105 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
106 for (r = 0; r < vcpu->arch.slb_max; ++r)
107 pr_err(" ESID = %.16llx VSID = %.16llx\n",
108 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
109 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000110 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
Paul Mackerrasde56a942011-06-29 00:21:34 +0000111 vcpu->arch.last_inst);
112}
113
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000114struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
115{
116 int r;
117 struct kvm_vcpu *v, *ret = NULL;
118
119 mutex_lock(&kvm->lock);
120 kvm_for_each_vcpu(r, v, kvm) {
121 if (v->vcpu_id == id) {
122 ret = v;
123 break;
124 }
125 }
126 mutex_unlock(&kvm->lock);
127 return ret;
128}
129
130static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
131{
132 vpa->shared_proc = 1;
133 vpa->yield_count = 1;
134}
135
136static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
137 unsigned long flags,
138 unsigned long vcpuid, unsigned long vpa)
139{
140 struct kvm *kvm = vcpu->kvm;
Paul Mackerras93e60242011-12-12 12:28:55 +0000141 unsigned long len, nb;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000142 void *va;
143 struct kvm_vcpu *tvcpu;
Paul Mackerras93e60242011-12-12 12:28:55 +0000144 int err = H_PARAMETER;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000145
146 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
147 if (!tvcpu)
148 return H_PARAMETER;
149
150 flags >>= 63 - 18;
151 flags &= 7;
152 if (flags == 0 || flags == 4)
153 return H_PARAMETER;
154 if (flags < 4) {
155 if (vpa & 0x7f)
156 return H_PARAMETER;
Paul Mackerras93e60242011-12-12 12:28:55 +0000157 if (flags >= 2 && !tvcpu->arch.vpa)
158 return H_RESOURCE;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000159 /* registering new area; convert logical addr to real */
Paul Mackerras93e60242011-12-12 12:28:55 +0000160 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
161 if (va == NULL)
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000162 return H_PARAMETER;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000163 if (flags <= 1)
164 len = *(unsigned short *)(va + 4);
165 else
166 len = *(unsigned int *)(va + 4);
Paul Mackerras93e60242011-12-12 12:28:55 +0000167 if (len > nb)
168 goto out_unpin;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000169 switch (flags) {
170 case 1: /* register VPA */
171 if (len < 640)
Paul Mackerras93e60242011-12-12 12:28:55 +0000172 goto out_unpin;
173 if (tvcpu->arch.vpa)
174 kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000175 tvcpu->arch.vpa = va;
176 init_vpa(vcpu, va);
177 break;
178 case 2: /* register DTL */
179 if (len < 48)
Paul Mackerras93e60242011-12-12 12:28:55 +0000180 goto out_unpin;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000181 len -= len % 48;
Paul Mackerras93e60242011-12-12 12:28:55 +0000182 if (tvcpu->arch.dtl)
183 kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000184 tvcpu->arch.dtl = va;
185 tvcpu->arch.dtl_end = va + len;
186 break;
187 case 3: /* register SLB shadow buffer */
Paul Mackerras93e60242011-12-12 12:28:55 +0000188 if (len < 16)
189 goto out_unpin;
190 if (tvcpu->arch.slb_shadow)
191 kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000192 tvcpu->arch.slb_shadow = va;
193 break;
194 }
195 } else {
196 switch (flags) {
197 case 5: /* unregister VPA */
198 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
199 return H_RESOURCE;
Paul Mackerras93e60242011-12-12 12:28:55 +0000200 if (!tvcpu->arch.vpa)
201 break;
202 kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000203 tvcpu->arch.vpa = NULL;
204 break;
205 case 6: /* unregister DTL */
Paul Mackerras93e60242011-12-12 12:28:55 +0000206 if (!tvcpu->arch.dtl)
207 break;
208 kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000209 tvcpu->arch.dtl = NULL;
210 break;
211 case 7: /* unregister SLB shadow buffer */
Paul Mackerras93e60242011-12-12 12:28:55 +0000212 if (!tvcpu->arch.slb_shadow)
213 break;
214 kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000215 tvcpu->arch.slb_shadow = NULL;
216 break;
217 }
218 }
219 return H_SUCCESS;
Paul Mackerras93e60242011-12-12 12:28:55 +0000220
221 out_unpin:
222 kvmppc_unpin_guest_page(kvm, va);
223 return err;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000224}
225
226int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
227{
228 unsigned long req = kvmppc_get_gpr(vcpu, 3);
229 unsigned long target, ret = H_SUCCESS;
230 struct kvm_vcpu *tvcpu;
231
232 switch (req) {
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000233 case H_ENTER:
234 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
235 kvmppc_get_gpr(vcpu, 5),
236 kvmppc_get_gpr(vcpu, 6),
237 kvmppc_get_gpr(vcpu, 7));
238 break;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000239 case H_CEDE:
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000240 break;
241 case H_PROD:
242 target = kvmppc_get_gpr(vcpu, 4);
243 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
244 if (!tvcpu) {
245 ret = H_PARAMETER;
246 break;
247 }
248 tvcpu->arch.prodded = 1;
249 smp_mb();
250 if (vcpu->arch.ceded) {
251 if (waitqueue_active(&vcpu->wq)) {
252 wake_up_interruptible(&vcpu->wq);
253 vcpu->stat.halt_wakeup++;
254 }
255 }
256 break;
257 case H_CONFER:
258 break;
259 case H_REGISTER_VPA:
260 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
261 kvmppc_get_gpr(vcpu, 5),
262 kvmppc_get_gpr(vcpu, 6));
263 break;
264 default:
265 return RESUME_HOST;
266 }
267 kvmppc_set_gpr(vcpu, 3, ret);
268 vcpu->arch.hcall_needed = 0;
269 return RESUME_GUEST;
270}
271
Paul Mackerrasde56a942011-06-29 00:21:34 +0000272static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
273 struct task_struct *tsk)
274{
275 int r = RESUME_HOST;
276
277 vcpu->stat.sum_exits++;
278
279 run->exit_reason = KVM_EXIT_UNKNOWN;
280 run->ready_for_interrupt_injection = 1;
281 switch (vcpu->arch.trap) {
282 /* We're good on these - the host merely wanted to get our attention */
283 case BOOK3S_INTERRUPT_HV_DECREMENTER:
284 vcpu->stat.dec_exits++;
285 r = RESUME_GUEST;
286 break;
287 case BOOK3S_INTERRUPT_EXTERNAL:
288 vcpu->stat.ext_intr_exits++;
289 r = RESUME_GUEST;
290 break;
291 case BOOK3S_INTERRUPT_PERFMON:
292 r = RESUME_GUEST;
293 break;
294 case BOOK3S_INTERRUPT_PROGRAM:
295 {
296 ulong flags;
297 /*
298 * Normally program interrupts are delivered directly
299 * to the guest by the hardware, but we can get here
300 * as a result of a hypervisor emulation interrupt
301 * (e40) getting turned into a 700 by BML RTAS.
302 */
303 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
304 kvmppc_core_queue_program(vcpu, flags);
305 r = RESUME_GUEST;
306 break;
307 }
308 case BOOK3S_INTERRUPT_SYSCALL:
309 {
310 /* hcall - punt to userspace */
311 int i;
312
313 if (vcpu->arch.shregs.msr & MSR_PR) {
314 /* sc 1 from userspace - reflect to guest syscall */
315 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
316 r = RESUME_GUEST;
317 break;
318 }
319 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
320 for (i = 0; i < 9; ++i)
321 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
322 run->exit_reason = KVM_EXIT_PAPR_HCALL;
323 vcpu->arch.hcall_needed = 1;
324 r = RESUME_HOST;
325 break;
326 }
327 /*
Paul Mackerras342d3db2011-12-12 12:38:05 +0000328 * We get these next two if the guest accesses a page which it thinks
329 * it has mapped but which is not actually present, either because
330 * it is for an emulated I/O device or because the corresonding
331 * host page has been paged out. Any other HDSI/HISI interrupts
332 * have been handled already.
Paul Mackerrasde56a942011-06-29 00:21:34 +0000333 */
334 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
Paul Mackerras697d3892011-12-12 12:36:37 +0000335 r = kvmppc_book3s_hv_page_fault(run, vcpu,
336 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000337 break;
338 case BOOK3S_INTERRUPT_H_INST_STORAGE:
Paul Mackerras342d3db2011-12-12 12:38:05 +0000339 r = kvmppc_book3s_hv_page_fault(run, vcpu,
340 kvmppc_get_pc(vcpu), 0);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000341 break;
342 /*
343 * This occurs if the guest executes an illegal instruction.
344 * We just generate a program interrupt to the guest, since
345 * we don't emulate any guest instructions at this stage.
346 */
347 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
348 kvmppc_core_queue_program(vcpu, 0x80000);
349 r = RESUME_GUEST;
350 break;
351 default:
352 kvmppc_dump_regs(vcpu);
353 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
354 vcpu->arch.trap, kvmppc_get_pc(vcpu),
355 vcpu->arch.shregs.msr);
356 r = RESUME_HOST;
357 BUG();
358 break;
359 }
360
Paul Mackerrasde56a942011-06-29 00:21:34 +0000361 return r;
362}
363
364int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
365 struct kvm_sregs *sregs)
366{
367 int i;
368
369 sregs->pvr = vcpu->arch.pvr;
370
371 memset(sregs, 0, sizeof(struct kvm_sregs));
372 for (i = 0; i < vcpu->arch.slb_max; i++) {
373 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
374 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
375 }
376
377 return 0;
378}
379
380int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
381 struct kvm_sregs *sregs)
382{
383 int i, j;
384
385 kvmppc_set_pvr(vcpu, sregs->pvr);
386
387 j = 0;
388 for (i = 0; i < vcpu->arch.slb_nr; i++) {
389 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
390 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
391 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
392 ++j;
393 }
394 }
395 vcpu->arch.slb_max = j;
396
397 return 0;
398}
399
Paul Mackerras31f34382011-12-12 12:26:50 +0000400int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
401{
402 int r = -EINVAL;
403
404 switch (reg->id) {
405 case KVM_REG_PPC_HIOR:
406 r = put_user(0, (u64 __user *)reg->addr);
407 break;
408 default:
409 break;
410 }
411
412 return r;
413}
414
415int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
416{
417 int r = -EINVAL;
418
419 switch (reg->id) {
420 case KVM_REG_PPC_HIOR:
421 {
422 u64 hior;
423 /* Only allow this to be set to zero */
424 r = get_user(hior, (u64 __user *)reg->addr);
425 if (!r && (hior != 0))
426 r = -EINVAL;
427 break;
428 }
429 default:
430 break;
431 }
432
433 return r;
434}
435
Paul Mackerrasde56a942011-06-29 00:21:34 +0000436int kvmppc_core_check_processor_compat(void)
437{
Paul Mackerras9e368f22011-06-29 00:40:08 +0000438 if (cpu_has_feature(CPU_FTR_HVMODE))
Paul Mackerrasde56a942011-06-29 00:21:34 +0000439 return 0;
440 return -EIO;
441}
442
443struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
444{
445 struct kvm_vcpu *vcpu;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000446 int err = -EINVAL;
447 int core;
448 struct kvmppc_vcore *vcore;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000449
Paul Mackerras371fefd2011-06-29 00:23:08 +0000450 core = id / threads_per_core;
451 if (core >= KVM_MAX_VCORES)
452 goto out;
453
454 err = -ENOMEM;
Sasha Levin6b75e6b2011-12-07 10:24:56 +0200455 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000456 if (!vcpu)
457 goto out;
458
459 err = kvm_vcpu_init(vcpu, kvm, id);
460 if (err)
461 goto free_vcpu;
462
463 vcpu->arch.shared = &vcpu->arch.shregs;
464 vcpu->arch.last_cpu = -1;
465 vcpu->arch.mmcr[0] = MMCR0_FC;
466 vcpu->arch.ctrl = CTRL_RUNLATCH;
467 /* default to host PVR, since we can't spoof it */
468 vcpu->arch.pvr = mfspr(SPRN_PVR);
469 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
470
Paul Mackerrasde56a942011-06-29 00:21:34 +0000471 kvmppc_mmu_book3s_hv_init(vcpu);
472
Paul Mackerras371fefd2011-06-29 00:23:08 +0000473 /*
Paul Mackerras19ccb762011-07-23 17:42:46 +1000474 * We consider the vcpu stopped until we see the first run ioctl for it.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000475 */
Paul Mackerras19ccb762011-07-23 17:42:46 +1000476 vcpu->arch.state = KVMPPC_VCPU_STOPPED;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000477
478 init_waitqueue_head(&vcpu->arch.cpu_run);
479
480 mutex_lock(&kvm->lock);
481 vcore = kvm->arch.vcores[core];
482 if (!vcore) {
483 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
484 if (vcore) {
485 INIT_LIST_HEAD(&vcore->runnable_threads);
486 spin_lock_init(&vcore->lock);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000487 init_waitqueue_head(&vcore->wq);
Paul Mackerras371fefd2011-06-29 00:23:08 +0000488 }
489 kvm->arch.vcores[core] = vcore;
490 }
491 mutex_unlock(&kvm->lock);
492
493 if (!vcore)
494 goto free_vcpu;
495
496 spin_lock(&vcore->lock);
497 ++vcore->num_threads;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000498 spin_unlock(&vcore->lock);
499 vcpu->arch.vcore = vcore;
500
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200501 vcpu->arch.cpu_type = KVM_CPU_3S_64;
502 kvmppc_sanity_check(vcpu);
503
Paul Mackerrasde56a942011-06-29 00:21:34 +0000504 return vcpu;
505
506free_vcpu:
Sasha Levin6b75e6b2011-12-07 10:24:56 +0200507 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000508out:
509 return ERR_PTR(err);
510}
511
512void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
513{
Paul Mackerras93e60242011-12-12 12:28:55 +0000514 if (vcpu->arch.dtl)
515 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
516 if (vcpu->arch.slb_shadow)
517 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
518 if (vcpu->arch.vpa)
519 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000520 kvm_vcpu_uninit(vcpu);
Sasha Levin6b75e6b2011-12-07 10:24:56 +0200521 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000522}
523
Paul Mackerras19ccb762011-07-23 17:42:46 +1000524static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000525{
Paul Mackerras19ccb762011-07-23 17:42:46 +1000526 unsigned long dec_nsec, now;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000527
Paul Mackerras19ccb762011-07-23 17:42:46 +1000528 now = get_tb();
529 if (now > vcpu->arch.dec_expires) {
530 /* decrementer has already gone negative */
531 kvmppc_core_queue_dec(vcpu);
Scott Wood7e28e602011-11-08 18:23:20 -0600532 kvmppc_core_prepare_to_enter(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000533 return;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000534 }
Paul Mackerras19ccb762011-07-23 17:42:46 +1000535 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
536 / tb_ticks_per_sec;
537 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
538 HRTIMER_MODE_REL);
539 vcpu->arch.timer_running = 1;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000540}
541
Paul Mackerras19ccb762011-07-23 17:42:46 +1000542static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000543{
Paul Mackerras19ccb762011-07-23 17:42:46 +1000544 vcpu->arch.ceded = 0;
545 if (vcpu->arch.timer_running) {
546 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
547 vcpu->arch.timer_running = 0;
548 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000549}
550
551extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
552extern void xics_wake_cpu(int cpu);
553
554static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
555 struct kvm_vcpu *vcpu)
556{
557 struct kvm_vcpu *v;
558
559 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
560 return;
561 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
562 --vc->n_runnable;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000563 ++vc->n_busy;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000564 /* decrement the physical thread id of each following vcpu */
565 v = vcpu;
566 list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
567 --v->arch.ptid;
568 list_del(&vcpu->arch.run_list);
569}
570
571static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
572{
573 int cpu;
574 struct paca_struct *tpaca;
575 struct kvmppc_vcore *vc = vcpu->arch.vcore;
576
Paul Mackerras19ccb762011-07-23 17:42:46 +1000577 if (vcpu->arch.timer_running) {
578 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
579 vcpu->arch.timer_running = 0;
580 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000581 cpu = vc->pcpu + vcpu->arch.ptid;
582 tpaca = &paca[cpu];
583 tpaca->kvm_hstate.kvm_vcpu = vcpu;
584 tpaca->kvm_hstate.kvm_vcore = vc;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000585 tpaca->kvm_hstate.napping = 0;
586 vcpu->cpu = vc->pcpu;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000587 smp_wmb();
Michael Neuling251da032011-11-10 16:03:20 +0000588#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000589 if (vcpu->arch.ptid) {
590 tpaca->cpu_start = 0x80;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000591 wmb();
592 xics_wake_cpu(cpu);
593 ++vc->n_woken;
594 }
595#endif
596}
597
598static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
599{
600 int i;
601
602 HMT_low();
603 i = 0;
604 while (vc->nap_count < vc->n_woken) {
605 if (++i >= 1000000) {
606 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
607 vc->nap_count, vc->n_woken);
608 break;
609 }
610 cpu_relax();
611 }
612 HMT_medium();
613}
614
615/*
616 * Check that we are on thread 0 and that any other threads in
617 * this core are off-line.
618 */
619static int on_primary_thread(void)
620{
621 int cpu = smp_processor_id();
622 int thr = cpu_thread_in_core(cpu);
623
624 if (thr)
625 return 0;
626 while (++thr < threads_per_core)
627 if (cpu_online(cpu + thr))
628 return 0;
629 return 1;
630}
631
632/*
633 * Run a set of guest threads on a physical core.
634 * Called with vc->lock held.
635 */
636static int kvmppc_run_core(struct kvmppc_vcore *vc)
637{
Paul Mackerras19ccb762011-07-23 17:42:46 +1000638 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000639 long ret;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000640 u64 now;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000641 int ptid;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000642
Paul Mackerras371fefd2011-06-29 00:23:08 +0000643 /* don't start if any threads have a signal pending */
644 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
645 if (signal_pending(vcpu->arch.run_task))
646 return 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000647
648 /*
649 * Make sure we are running on thread 0, and that
650 * secondary threads are offline.
651 * XXX we should also block attempts to bring any
652 * secondary threads online.
653 */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000654 if (threads_per_core > 1 && !on_primary_thread()) {
655 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
656 vcpu->arch.ret = -EBUSY;
657 goto out;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000658 }
659
Paul Mackerras19ccb762011-07-23 17:42:46 +1000660 /*
661 * Assign physical thread IDs, first to non-ceded vcpus
662 * and then to ceded ones.
663 */
664 ptid = 0;
665 vcpu0 = NULL;
666 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
667 if (!vcpu->arch.ceded) {
668 if (!ptid)
669 vcpu0 = vcpu;
670 vcpu->arch.ptid = ptid++;
671 }
672 }
673 if (!vcpu0)
674 return 0; /* nothing to run */
675 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
676 if (vcpu->arch.ceded)
677 vcpu->arch.ptid = ptid++;
678
Paul Mackerras371fefd2011-06-29 00:23:08 +0000679 vc->n_woken = 0;
680 vc->nap_count = 0;
681 vc->entry_exit_count = 0;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000682 vc->vcore_state = VCORE_RUNNING;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000683 vc->in_guest = 0;
684 vc->pcpu = smp_processor_id();
Paul Mackerras19ccb762011-07-23 17:42:46 +1000685 vc->napping_threads = 0;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000686 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
687 kvmppc_start_thread(vcpu);
Paul Mackerras371fefd2011-06-29 00:23:08 +0000688
689 preempt_disable();
Paul Mackerras19ccb762011-07-23 17:42:46 +1000690 spin_unlock(&vc->lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000691
Paul Mackerras19ccb762011-07-23 17:42:46 +1000692 kvm_guest_enter();
693 __kvmppc_vcore_entry(NULL, vcpu0);
694
Paul Mackerras371fefd2011-06-29 00:23:08 +0000695 spin_lock(&vc->lock);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000696 /* disable sending of IPIs on virtual external irqs */
697 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
698 vcpu->cpu = -1;
699 /* wait for secondary threads to finish writing their state to memory */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000700 if (vc->nap_count < vc->n_woken)
701 kvmppc_wait_for_nap(vc);
702 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
Paul Mackerras19ccb762011-07-23 17:42:46 +1000703 vc->vcore_state = VCORE_EXITING;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000704 spin_unlock(&vc->lock);
705
706 /* make sure updates to secondary vcpu structs are visible now */
707 smp_mb();
Paul Mackerrasde56a942011-06-29 00:21:34 +0000708 kvm_guest_exit();
709
710 preempt_enable();
711 kvm_resched(vcpu);
712
713 now = get_tb();
Paul Mackerras371fefd2011-06-29 00:23:08 +0000714 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
715 /* cancel pending dec exception if dec is positive */
716 if (now < vcpu->arch.dec_expires &&
717 kvmppc_core_pending_dec(vcpu))
718 kvmppc_core_dequeue_dec(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000719
720 ret = RESUME_GUEST;
721 if (vcpu->arch.trap)
722 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
723 vcpu->arch.run_task);
724
Paul Mackerras371fefd2011-06-29 00:23:08 +0000725 vcpu->arch.ret = ret;
726 vcpu->arch.trap = 0;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000727
728 if (vcpu->arch.ceded) {
729 if (ret != RESUME_GUEST)
730 kvmppc_end_cede(vcpu);
731 else
732 kvmppc_set_timer(vcpu);
733 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000734 }
Paul Mackerrasde56a942011-06-29 00:21:34 +0000735
Paul Mackerras371fefd2011-06-29 00:23:08 +0000736 spin_lock(&vc->lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000737 out:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000738 vc->vcore_state = VCORE_INACTIVE;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000739 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
740 arch.run_list) {
741 if (vcpu->arch.ret != RESUME_GUEST) {
742 kvmppc_remove_runnable(vc, vcpu);
743 wake_up(&vcpu->arch.cpu_run);
744 }
745 }
746
747 return 1;
748}
749
Paul Mackerras19ccb762011-07-23 17:42:46 +1000750/*
751 * Wait for some other vcpu thread to execute us, and
752 * wake us up when we need to handle something in the host.
753 */
754static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000755{
Paul Mackerras371fefd2011-06-29 00:23:08 +0000756 DEFINE_WAIT(wait);
757
Paul Mackerras19ccb762011-07-23 17:42:46 +1000758 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
759 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
760 schedule();
761 finish_wait(&vcpu->arch.cpu_run, &wait);
762}
Paul Mackerras371fefd2011-06-29 00:23:08 +0000763
Paul Mackerras19ccb762011-07-23 17:42:46 +1000764/*
765 * All the vcpus in this vcore are idle, so wait for a decrementer
766 * or external interrupt to one of the vcpus. vc->lock is held.
767 */
768static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
769{
770 DEFINE_WAIT(wait);
771 struct kvm_vcpu *v;
772 int all_idle = 1;
773
774 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
775 vc->vcore_state = VCORE_SLEEPING;
776 spin_unlock(&vc->lock);
777 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
778 if (!v->arch.ceded || v->arch.pending_exceptions) {
779 all_idle = 0;
780 break;
781 }
782 }
783 if (all_idle)
784 schedule();
785 finish_wait(&vc->wq, &wait);
786 spin_lock(&vc->lock);
787 vc->vcore_state = VCORE_INACTIVE;
788}
789
790static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
791{
792 int n_ceded;
793 int prev_state;
794 struct kvmppc_vcore *vc;
795 struct kvm_vcpu *v, *vn;
Paul Mackerras9e368f22011-06-29 00:40:08 +0000796
Paul Mackerras371fefd2011-06-29 00:23:08 +0000797 kvm_run->exit_reason = 0;
798 vcpu->arch.ret = RESUME_GUEST;
799 vcpu->arch.trap = 0;
800
Paul Mackerras371fefd2011-06-29 00:23:08 +0000801 /*
802 * Synchronize with other threads in this virtual core
803 */
804 vc = vcpu->arch.vcore;
805 spin_lock(&vc->lock);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000806 vcpu->arch.ceded = 0;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000807 vcpu->arch.run_task = current;
808 vcpu->arch.kvm_run = kvm_run;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000809 prev_state = vcpu->arch.state;
810 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000811 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
812 ++vc->n_runnable;
813
Paul Mackerras19ccb762011-07-23 17:42:46 +1000814 /*
815 * This happens the first time this is called for a vcpu.
816 * If the vcore is already running, we may be able to start
817 * this thread straight away and have it join in.
818 */
819 if (prev_state == KVMPPC_VCPU_STOPPED) {
820 if (vc->vcore_state == VCORE_RUNNING &&
821 VCORE_EXIT_COUNT(vc) == 0) {
822 vcpu->arch.ptid = vc->n_runnable - 1;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000823 kvmppc_start_thread(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000824 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000825
Paul Mackerras19ccb762011-07-23 17:42:46 +1000826 } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST)
827 --vc->n_busy;
828
829 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
830 !signal_pending(current)) {
831 if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) {
832 spin_unlock(&vc->lock);
833 kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
834 spin_lock(&vc->lock);
835 continue;
836 }
837 n_ceded = 0;
838 list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
839 n_ceded += v->arch.ceded;
840 if (n_ceded == vc->n_runnable)
841 kvmppc_vcore_blocked(vc);
842 else
843 kvmppc_run_core(vc);
844
845 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
846 arch.run_list) {
Scott Wood7e28e602011-11-08 18:23:20 -0600847 kvmppc_core_prepare_to_enter(v);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000848 if (signal_pending(v->arch.run_task)) {
849 kvmppc_remove_runnable(vc, v);
850 v->stat.signal_exits++;
851 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
852 v->arch.ret = -EINTR;
853 wake_up(&v->arch.cpu_run);
854 }
855 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000856 }
857
Paul Mackerras19ccb762011-07-23 17:42:46 +1000858 if (signal_pending(current)) {
859 if (vc->vcore_state == VCORE_RUNNING ||
860 vc->vcore_state == VCORE_EXITING) {
861 spin_unlock(&vc->lock);
862 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
863 spin_lock(&vc->lock);
864 }
865 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
866 kvmppc_remove_runnable(vc, vcpu);
867 vcpu->stat.signal_exits++;
868 kvm_run->exit_reason = KVM_EXIT_INTR;
869 vcpu->arch.ret = -EINTR;
870 }
871 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000872
Paul Mackerras19ccb762011-07-23 17:42:46 +1000873 spin_unlock(&vc->lock);
Paul Mackerras371fefd2011-06-29 00:23:08 +0000874 return vcpu->arch.ret;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000875}
876
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000877int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
878{
879 int r;
880
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200881 if (!vcpu->arch.sane) {
882 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
883 return -EINVAL;
884 }
885
Scott Wood25051b52011-11-08 18:23:23 -0600886 kvmppc_core_prepare_to_enter(vcpu);
887
Paul Mackerras19ccb762011-07-23 17:42:46 +1000888 /* No need to go into the guest when all we'll do is come back out */
889 if (signal_pending(current)) {
890 run->exit_reason = KVM_EXIT_INTR;
891 return -EINTR;
892 }
893
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000894 /* On the first time here, set up VRMA or RMA */
895 if (!vcpu->kvm->arch.rma_setup_done) {
896 r = kvmppc_hv_setup_rma(vcpu);
897 if (r)
898 return r;
899 }
Paul Mackerras19ccb762011-07-23 17:42:46 +1000900
901 flush_fp_to_thread(current);
902 flush_altivec_to_thread(current);
903 flush_vsx_to_thread(current);
904 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000905 vcpu->arch.pgdir = current->mm->pgd;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000906
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000907 do {
908 r = kvmppc_run_vcpu(run, vcpu);
909
910 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
911 !(vcpu->arch.shregs.msr & MSR_PR)) {
912 r = kvmppc_pseries_do_hcall(vcpu);
Scott Wood7e28e602011-11-08 18:23:20 -0600913 kvmppc_core_prepare_to_enter(vcpu);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000914 }
915 } while (r == RESUME_GUEST);
916 return r;
917}
918
David Gibson54738c02011-06-29 00:22:41 +0000919static long kvmppc_stt_npages(unsigned long window_size)
920{
921 return ALIGN((window_size >> SPAPR_TCE_SHIFT)
922 * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
923}
924
925static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
926{
927 struct kvm *kvm = stt->kvm;
928 int i;
929
930 mutex_lock(&kvm->lock);
931 list_del(&stt->list);
932 for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
933 __free_page(stt->pages[i]);
934 kfree(stt);
935 mutex_unlock(&kvm->lock);
936
937 kvm_put_kvm(kvm);
938}
939
940static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
941{
942 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
943 struct page *page;
944
945 if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
946 return VM_FAULT_SIGBUS;
947
948 page = stt->pages[vmf->pgoff];
949 get_page(page);
950 vmf->page = page;
951 return 0;
952}
953
954static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
955 .fault = kvm_spapr_tce_fault,
956};
957
958static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
959{
960 vma->vm_ops = &kvm_spapr_tce_vm_ops;
961 return 0;
962}
963
964static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
965{
966 struct kvmppc_spapr_tce_table *stt = filp->private_data;
967
968 release_spapr_tce_table(stt);
969 return 0;
970}
971
972static struct file_operations kvm_spapr_tce_fops = {
973 .mmap = kvm_spapr_tce_mmap,
974 .release = kvm_spapr_tce_release,
975};
976
977long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
978 struct kvm_create_spapr_tce *args)
979{
980 struct kvmppc_spapr_tce_table *stt = NULL;
981 long npages;
982 int ret = -ENOMEM;
983 int i;
984
985 /* Check this LIOBN hasn't been previously allocated */
986 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
987 if (stt->liobn == args->liobn)
988 return -EBUSY;
989 }
990
991 npages = kvmppc_stt_npages(args->window_size);
992
993 stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
994 GFP_KERNEL);
995 if (!stt)
996 goto fail;
997
998 stt->liobn = args->liobn;
999 stt->window_size = args->window_size;
1000 stt->kvm = kvm;
1001
1002 for (i = 0; i < npages; i++) {
1003 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
1004 if (!stt->pages[i])
1005 goto fail;
1006 }
1007
1008 kvm_get_kvm(kvm);
1009
1010 mutex_lock(&kvm->lock);
1011 list_add(&stt->list, &kvm->arch.spapr_tce_tables);
1012
1013 mutex_unlock(&kvm->lock);
1014
1015 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
1016 stt, O_RDWR);
1017
1018fail:
1019 if (stt) {
1020 for (i = 0; i < npages; i++)
1021 if (stt->pages[i])
1022 __free_page(stt->pages[i]);
1023
1024 kfree(stt);
1025 }
1026 return ret;
1027}
1028
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001029/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Paul Mackerras9e368f22011-06-29 00:40:08 +00001030 Assumes POWER7 or PPC970. */
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001031static inline int lpcr_rmls(unsigned long rma_size)
1032{
1033 switch (rma_size) {
1034 case 32ul << 20: /* 32 MB */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001035 if (cpu_has_feature(CPU_FTR_ARCH_206))
1036 return 8; /* only supported on POWER7 */
1037 return -1;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001038 case 64ul << 20: /* 64 MB */
1039 return 3;
1040 case 128ul << 20: /* 128 MB */
1041 return 7;
1042 case 256ul << 20: /* 256 MB */
1043 return 4;
1044 case 1ul << 30: /* 1 GB */
1045 return 2;
1046 case 16ul << 30: /* 16 GB */
1047 return 1;
1048 case 256ul << 30: /* 256 GB */
1049 return 0;
1050 default:
1051 return -1;
1052 }
1053}
1054
1055static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1056{
Alexander Grafb4e70612012-01-16 16:50:10 +01001057 struct kvmppc_linear_info *ri = vma->vm_file->private_data;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001058 struct page *page;
1059
1060 if (vmf->pgoff >= ri->npages)
1061 return VM_FAULT_SIGBUS;
1062
1063 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
1064 get_page(page);
1065 vmf->page = page;
1066 return 0;
1067}
1068
1069static const struct vm_operations_struct kvm_rma_vm_ops = {
1070 .fault = kvm_rma_fault,
1071};
1072
1073static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1074{
1075 vma->vm_flags |= VM_RESERVED;
1076 vma->vm_ops = &kvm_rma_vm_ops;
1077 return 0;
1078}
1079
1080static int kvm_rma_release(struct inode *inode, struct file *filp)
1081{
Alexander Grafb4e70612012-01-16 16:50:10 +01001082 struct kvmppc_linear_info *ri = filp->private_data;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001083
1084 kvm_release_rma(ri);
1085 return 0;
1086}
1087
1088static struct file_operations kvm_rma_fops = {
1089 .mmap = kvm_rma_mmap,
1090 .release = kvm_rma_release,
1091};
1092
1093long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1094{
Alexander Grafb4e70612012-01-16 16:50:10 +01001095 struct kvmppc_linear_info *ri;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001096 long fd;
1097
1098 ri = kvm_alloc_rma();
1099 if (!ri)
1100 return -ENOMEM;
1101
1102 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
1103 if (fd < 0)
1104 kvm_release_rma(ri);
1105
1106 ret->rma_size = ri->npages << PAGE_SHIFT;
1107 return fd;
1108}
1109
Paul Mackerras82ed3612011-12-15 02:03:22 +00001110/*
1111 * Get (and clear) the dirty memory log for a memory slot.
1112 */
1113int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1114{
1115 struct kvm_memory_slot *memslot;
1116 int r;
1117 unsigned long n;
1118
1119 mutex_lock(&kvm->slots_lock);
1120
1121 r = -EINVAL;
1122 if (log->slot >= KVM_MEMORY_SLOTS)
1123 goto out;
1124
1125 memslot = id_to_memslot(kvm->memslots, log->slot);
1126 r = -ENOENT;
1127 if (!memslot->dirty_bitmap)
1128 goto out;
1129
1130 n = kvm_dirty_bitmap_bytes(memslot);
1131 memset(memslot->dirty_bitmap, 0, n);
1132
1133 r = kvmppc_hv_get_dirty_log(kvm, memslot);
1134 if (r)
1135 goto out;
1136
1137 r = -EFAULT;
1138 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1139 goto out;
1140
1141 r = 0;
1142out:
1143 mutex_unlock(&kvm->slots_lock);
1144 return r;
1145}
1146
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001147static unsigned long slb_pgsize_encoding(unsigned long psize)
1148{
1149 unsigned long senc = 0;
1150
1151 if (psize > 0x1000) {
1152 senc = SLB_VSID_L;
1153 if (psize == 0x10000)
1154 senc |= SLB_VSID_LP_01;
1155 }
1156 return senc;
1157}
1158
Paul Mackerrasde56a942011-06-29 00:21:34 +00001159int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1160 struct kvm_userspace_memory_region *mem)
1161{
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001162 unsigned long npages;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001163 unsigned long *phys;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001164
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001165 /* Allocate a slot_phys array */
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001166 phys = kvm->arch.slot_phys[mem->slot];
Paul Mackerras342d3db2011-12-12 12:38:05 +00001167 if (!kvm->arch.using_mmu_notifiers && !phys) {
1168 npages = mem->memory_size >> PAGE_SHIFT;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001169 phys = vzalloc(npages * sizeof(unsigned long));
1170 if (!phys)
1171 return -ENOMEM;
1172 kvm->arch.slot_phys[mem->slot] = phys;
1173 kvm->arch.slot_npages[mem->slot] = npages;
1174 }
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001175
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001176 return 0;
1177}
1178
1179static void unpin_slot(struct kvm *kvm, int slot_id)
1180{
1181 unsigned long *physp;
1182 unsigned long j, npages, pfn;
1183 struct page *page;
1184
1185 physp = kvm->arch.slot_phys[slot_id];
1186 npages = kvm->arch.slot_npages[slot_id];
1187 if (physp) {
1188 spin_lock(&kvm->arch.slot_phys_lock);
1189 for (j = 0; j < npages; j++) {
1190 if (!(physp[j] & KVMPPC_GOT_PAGE))
1191 continue;
1192 pfn = physp[j] >> PAGE_SHIFT;
1193 page = pfn_to_page(pfn);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001194 if (PageHuge(page))
1195 page = compound_head(page);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001196 SetPageDirty(page);
1197 put_page(page);
1198 }
1199 kvm->arch.slot_phys[slot_id] = NULL;
1200 spin_unlock(&kvm->arch.slot_phys_lock);
1201 vfree(physp);
1202 }
1203}
1204
1205void kvmppc_core_commit_memory_region(struct kvm *kvm,
1206 struct kvm_userspace_memory_region *mem)
1207{
1208}
1209
1210static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
1211{
1212 int err = 0;
1213 struct kvm *kvm = vcpu->kvm;
Alexander Grafb4e70612012-01-16 16:50:10 +01001214 struct kvmppc_linear_info *ri = NULL;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001215 unsigned long hva;
1216 struct kvm_memory_slot *memslot;
1217 struct vm_area_struct *vma;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001218 unsigned long lpcr, senc;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001219 unsigned long psize, porder;
1220 unsigned long rma_size;
1221 unsigned long rmls;
1222 unsigned long *physp;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001223 unsigned long i, npages;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001224
1225 mutex_lock(&kvm->lock);
1226 if (kvm->arch.rma_setup_done)
1227 goto out; /* another vcpu beat us to it */
1228
1229 /* Look up the memslot for guest physical address 0 */
1230 memslot = gfn_to_memslot(kvm, 0);
1231
1232 /* We must have some memory at 0 by now */
1233 err = -EINVAL;
1234 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1235 goto out;
1236
1237 /* Look up the VMA for the start of this memory slot */
1238 hva = memslot->userspace_addr;
1239 down_read(&current->mm->mmap_sem);
1240 vma = find_vma(current->mm, hva);
1241 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
1242 goto up_out;
1243
1244 psize = vma_kernel_pagesize(vma);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001245 porder = __ilog2(psize);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001246
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001247 /* Is this one of our preallocated RMAs? */
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001248 if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
1249 hva == vma->vm_start)
1250 ri = vma->vm_file->private_data;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001251
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001252 up_read(&current->mm->mmap_sem);
1253
1254 if (!ri) {
1255 /* On POWER7, use VRMA; on PPC970, give up */
1256 err = -EPERM;
1257 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1258 pr_err("KVM: CPU requires an RMO\n");
1259 goto out;
Paul Mackerras9e368f22011-06-29 00:40:08 +00001260 }
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001261
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001262 /* We can handle 4k, 64k or 16M pages in the VRMA */
1263 err = -EINVAL;
1264 if (!(psize == 0x1000 || psize == 0x10000 ||
1265 psize == 0x1000000))
1266 goto out;
1267
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001268 /* Update VRMASD field in the LPCR */
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001269 senc = slb_pgsize_encoding(psize);
Paul Mackerras697d3892011-12-12 12:36:37 +00001270 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1271 (VRMA_VSID << SLB_VSID_SHIFT_1T);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001272 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
1273 lpcr |= senc << (LPCR_VRMASD_SH - 4);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001274 kvm->arch.lpcr = lpcr;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001275
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001276 /* Create HPTEs in the hash page table for the VRMA */
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001277 kvmppc_map_vrma(vcpu, memslot, porder);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001278
1279 } else {
1280 /* Set up to use an RMO region */
1281 rma_size = ri->npages;
1282 if (rma_size > memslot->npages)
1283 rma_size = memslot->npages;
1284 rma_size <<= PAGE_SHIFT;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001285 rmls = lpcr_rmls(rma_size);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001286 err = -EINVAL;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001287 if (rmls < 0) {
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001288 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1289 goto out;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001290 }
1291 atomic_inc(&ri->use_count);
1292 kvm->arch.rma = ri;
Paul Mackerras9e368f22011-06-29 00:40:08 +00001293
1294 /* Update LPCR and RMOR */
1295 lpcr = kvm->arch.lpcr;
1296 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1297 /* PPC970; insert RMLS value (split field) in HID4 */
1298 lpcr &= ~((1ul << HID4_RMLS0_SH) |
1299 (3ul << HID4_RMLS2_SH));
1300 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
1301 ((rmls & 3) << HID4_RMLS2_SH);
1302 /* RMOR is also in HID4 */
1303 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1304 << HID4_RMOR_SH;
1305 } else {
1306 /* POWER7 */
1307 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1308 lpcr |= rmls << LPCR_RMLS_SH;
1309 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1310 }
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001311 kvm->arch.lpcr = lpcr;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001312 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001313 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001314
1315 /* Initialize phys addrs of pages in RMO */
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001316 npages = ri->npages;
1317 porder = __ilog2(npages);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001318 physp = kvm->arch.slot_phys[memslot->id];
1319 spin_lock(&kvm->arch.slot_phys_lock);
1320 for (i = 0; i < npages; ++i)
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001321 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001322 spin_unlock(&kvm->arch.slot_phys_lock);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001323 }
1324
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001325 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1326 smp_wmb();
1327 kvm->arch.rma_setup_done = 1;
1328 err = 0;
1329 out:
1330 mutex_unlock(&kvm->lock);
1331 return err;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001332
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001333 up_out:
1334 up_read(&current->mm->mmap_sem);
1335 goto out;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001336}
1337
1338int kvmppc_core_init_vm(struct kvm *kvm)
1339{
1340 long r;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001341 unsigned long lpcr;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001342
1343 /* Allocate hashed page table */
1344 r = kvmppc_alloc_hpt(kvm);
David Gibson54738c02011-06-29 00:22:41 +00001345 if (r)
1346 return r;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001347
David Gibson54738c02011-06-29 00:22:41 +00001348 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001349
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001350 kvm->arch.rma = NULL;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001351
Paul Mackerras9e368f22011-06-29 00:40:08 +00001352 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001353
Paul Mackerras9e368f22011-06-29 00:40:08 +00001354 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1355 /* PPC970; HID4 is effectively the LPCR */
1356 unsigned long lpid = kvm->arch.lpid;
1357 kvm->arch.host_lpid = 0;
1358 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1359 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
1360 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
1361 ((lpid & 0xf) << HID4_LPID5_SH);
1362 } else {
1363 /* POWER7; init LPCR for virtual RMA mode */
1364 kvm->arch.host_lpid = mfspr(SPRN_LPID);
1365 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1366 lpcr &= LPCR_PECE | LPCR_LPES;
1367 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
Paul Mackerras697d3892011-12-12 12:36:37 +00001368 LPCR_VPM0 | LPCR_VPM1;
1369 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
1370 (VRMA_VSID << SLB_VSID_SHIFT_1T);
Paul Mackerras9e368f22011-06-29 00:40:08 +00001371 }
1372 kvm->arch.lpcr = lpcr;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001373
Paul Mackerras342d3db2011-12-12 12:38:05 +00001374 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001375 spin_lock_init(&kvm->arch.slot_phys_lock);
David Gibson54738c02011-06-29 00:22:41 +00001376 return 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001377}
1378
1379void kvmppc_core_destroy_vm(struct kvm *kvm)
1380{
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001381 unsigned long i;
1382
Paul Mackerras342d3db2011-12-12 12:38:05 +00001383 if (!kvm->arch.using_mmu_notifiers)
1384 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
1385 unpin_slot(kvm, i);
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001386
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001387 if (kvm->arch.rma) {
1388 kvm_release_rma(kvm->arch.rma);
1389 kvm->arch.rma = NULL;
1390 }
1391
Paul Mackerrasde56a942011-06-29 00:21:34 +00001392 kvmppc_free_hpt(kvm);
David Gibson54738c02011-06-29 00:22:41 +00001393 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
Paul Mackerrasde56a942011-06-29 00:21:34 +00001394}
1395
1396/* These are stubs for now */
1397void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
1398{
1399}
1400
1401/* We don't need to emulate any privileged instructions or dcbz */
1402int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1403 unsigned int inst, int *advance)
1404{
1405 return EMULATE_FAIL;
1406}
1407
1408int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
1409{
1410 return EMULATE_FAIL;
1411}
1412
1413int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
1414{
1415 return EMULATE_FAIL;
1416}
1417
1418static int kvmppc_book3s_hv_init(void)
1419{
1420 int r;
1421
1422 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1423
1424 if (r)
1425 return r;
1426
1427 r = kvmppc_mmu_hv_init();
1428
1429 return r;
1430}
1431
1432static void kvmppc_book3s_hv_exit(void)
1433{
1434 kvm_exit();
1435}
1436
1437module_init(kvmppc_book3s_hv_init);
1438module_exit(kvmppc_book3s_hv_exit);