| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 
 | 3 |  * Author: Christoffer Dall <c.dall@virtualopensystems.com> | 
 | 4 |  * | 
 | 5 |  * This program is free software; you can redistribute it and/or modify | 
 | 6 |  * it under the terms of the GNU General Public License, version 2, as | 
 | 7 |  * published by the Free Software Foundation. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License | 
 | 15 |  * along with this program; if not, write to the Free Software | 
 | 16 |  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. | 
 | 17 |  */ | 
 | 18 |  | 
| Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 19 | #include <linux/cpu.h> | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 20 | #include <linux/errno.h> | 
 | 21 | #include <linux/err.h> | 
 | 22 | #include <linux/kvm_host.h> | 
 | 23 | #include <linux/module.h> | 
 | 24 | #include <linux/vmalloc.h> | 
 | 25 | #include <linux/fs.h> | 
 | 26 | #include <linux/mman.h> | 
 | 27 | #include <linux/sched.h> | 
| Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 28 | #include <linux/kvm.h> | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 29 | #include <trace/events/kvm.h> | 
 | 30 |  | 
 | 31 | #define CREATE_TRACE_POINTS | 
 | 32 | #include "trace.h" | 
 | 33 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 34 | #include <asm/uaccess.h> | 
 | 35 | #include <asm/ptrace.h> | 
 | 36 | #include <asm/mman.h> | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 37 | #include <asm/tlbflush.h> | 
| Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 38 | #include <asm/cacheflush.h> | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 39 | #include <asm/virt.h> | 
 | 40 | #include <asm/kvm_arm.h> | 
 | 41 | #include <asm/kvm_asm.h> | 
 | 42 | #include <asm/kvm_mmu.h> | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 43 | #include <asm/kvm_emulate.h> | 
| Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 44 | #include <asm/kvm_coproc.h> | 
| Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 45 | #include <asm/kvm_psci.h> | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 46 |  | 
 | 47 | #ifdef REQUIRES_VIRT | 
 | 48 | __asm__(".arch_extension	virt"); | 
 | 49 | #endif | 
 | 50 |  | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 51 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 52 | static kvm_cpu_context_t __percpu *kvm_host_cpu_state; | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 53 | static unsigned long hyp_default_vectors; | 
 | 54 |  | 
| Marc Zyngier | 1638a12 | 2013-01-21 19:36:11 -0500 | [diff] [blame] | 55 | /* Per-CPU variable containing the currently running vcpu. */ | 
 | 56 | static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); | 
 | 57 |  | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 58 | /* The VMID used in the VTTBR */ | 
 | 59 | static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); | 
 | 60 | static u8 kvm_next_vmid; | 
 | 61 | static DEFINE_SPINLOCK(kvm_vmid_lock); | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 62 |  | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 63 | static bool vgic_present; | 
 | 64 |  | 
| Marc Zyngier | 1638a12 | 2013-01-21 19:36:11 -0500 | [diff] [blame] | 65 | static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) | 
 | 66 | { | 
 | 67 | 	BUG_ON(preemptible()); | 
 | 68 | 	__get_cpu_var(kvm_arm_running_vcpu) = vcpu; | 
 | 69 | } | 
 | 70 |  | 
 | 71 | /** | 
 | 72 |  * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU. | 
 | 73 |  * Must be called from non-preemptible context | 
 | 74 |  */ | 
 | 75 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void) | 
 | 76 | { | 
 | 77 | 	BUG_ON(preemptible()); | 
 | 78 | 	return __get_cpu_var(kvm_arm_running_vcpu); | 
 | 79 | } | 
 | 80 |  | 
 | 81 | /** | 
 | 82 |  * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. | 
 | 83 |  */ | 
 | 84 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void) | 
 | 85 | { | 
 | 86 | 	return &kvm_arm_running_vcpu; | 
 | 87 | } | 
 | 88 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 89 | int kvm_arch_hardware_enable(void *garbage) | 
 | 90 | { | 
 | 91 | 	return 0; | 
 | 92 | } | 
 | 93 |  | 
 | 94 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | 
 | 95 | { | 
 | 96 | 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; | 
 | 97 | } | 
 | 98 |  | 
 | 99 | void kvm_arch_hardware_disable(void *garbage) | 
 | 100 | { | 
 | 101 | } | 
 | 102 |  | 
 | 103 | int kvm_arch_hardware_setup(void) | 
 | 104 | { | 
 | 105 | 	return 0; | 
 | 106 | } | 
 | 107 |  | 
 | 108 | void kvm_arch_hardware_unsetup(void) | 
 | 109 | { | 
 | 110 | } | 
 | 111 |  | 
 | 112 | void kvm_arch_check_processor_compat(void *rtn) | 
 | 113 | { | 
 | 114 | 	*(int *)rtn = 0; | 
 | 115 | } | 
 | 116 |  | 
 | 117 | void kvm_arch_sync_events(struct kvm *kvm) | 
 | 118 | { | 
 | 119 | } | 
 | 120 |  | 
| Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 121 | /** | 
 | 122 |  * kvm_arch_init_vm - initializes a VM data structure | 
 | 123 |  * @kvm:	pointer to the KVM struct | 
 | 124 |  */ | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 125 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 
 | 126 | { | 
| Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 127 | 	int ret = 0; | 
 | 128 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 129 | 	if (type) | 
 | 130 | 		return -EINVAL; | 
 | 131 |  | 
| Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 132 | 	ret = kvm_alloc_stage2_pgd(kvm); | 
 | 133 | 	if (ret) | 
 | 134 | 		goto out_fail_alloc; | 
 | 135 |  | 
 | 136 | 	ret = create_hyp_mappings(kvm, kvm + 1); | 
 | 137 | 	if (ret) | 
 | 138 | 		goto out_free_stage2_pgd; | 
 | 139 |  | 
 | 140 | 	/* Mark the initial VMID generation invalid */ | 
 | 141 | 	kvm->arch.vmid_gen = 0; | 
 | 142 |  | 
 | 143 | 	return ret; | 
 | 144 | out_free_stage2_pgd: | 
 | 145 | 	kvm_free_stage2_pgd(kvm); | 
 | 146 | out_fail_alloc: | 
 | 147 | 	return ret; | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 148 | } | 
 | 149 |  | 
 | 150 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | 
 | 151 | { | 
 | 152 | 	return VM_FAULT_SIGBUS; | 
 | 153 | } | 
 | 154 |  | 
 | 155 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, | 
 | 156 | 			   struct kvm_memory_slot *dont) | 
 | 157 | { | 
 | 158 | } | 
 | 159 |  | 
 | 160 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) | 
 | 161 | { | 
 | 162 | 	return 0; | 
 | 163 | } | 
 | 164 |  | 
| Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 165 | /** | 
 | 166 |  * kvm_arch_destroy_vm - destroy the VM data structure | 
 | 167 |  * @kvm:	pointer to the KVM struct | 
 | 168 |  */ | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 169 | void kvm_arch_destroy_vm(struct kvm *kvm) | 
 | 170 | { | 
 | 171 | 	int i; | 
 | 172 |  | 
| Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 173 | 	kvm_free_stage2_pgd(kvm); | 
 | 174 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 175 | 	for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 
 | 176 | 		if (kvm->vcpus[i]) { | 
 | 177 | 			kvm_arch_vcpu_free(kvm->vcpus[i]); | 
 | 178 | 			kvm->vcpus[i] = NULL; | 
 | 179 | 		} | 
 | 180 | 	} | 
 | 181 | } | 
 | 182 |  | 
 | 183 | int kvm_dev_ioctl_check_extension(long ext) | 
 | 184 | { | 
 | 185 | 	int r; | 
 | 186 | 	switch (ext) { | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 187 | 	case KVM_CAP_IRQCHIP: | 
 | 188 | 		r = vgic_present; | 
 | 189 | 		break; | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 190 | 	case KVM_CAP_USER_MEMORY: | 
 | 191 | 	case KVM_CAP_SYNC_MMU: | 
 | 192 | 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: | 
 | 193 | 	case KVM_CAP_ONE_REG: | 
| Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 194 | 	case KVM_CAP_ARM_PSCI: | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 195 | 		r = 1; | 
 | 196 | 		break; | 
 | 197 | 	case KVM_CAP_COALESCED_MMIO: | 
 | 198 | 		r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 
 | 199 | 		break; | 
| Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 200 | 	case KVM_CAP_ARM_SET_DEVICE_ADDR: | 
 | 201 | 		r = 1; | 
| Marc Zyngier | ca46e10 | 2013-04-03 10:43:13 +0100 | [diff] [blame] | 202 | 		break; | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 203 | 	case KVM_CAP_NR_VCPUS: | 
 | 204 | 		r = num_online_cpus(); | 
 | 205 | 		break; | 
 | 206 | 	case KVM_CAP_MAX_VCPUS: | 
 | 207 | 		r = KVM_MAX_VCPUS; | 
 | 208 | 		break; | 
 | 209 | 	default: | 
| Marc Zyngier | 17b1e31 | 2013-04-08 16:47:18 +0100 | [diff] [blame] | 210 | 		r = kvm_arch_dev_ioctl_check_extension(ext); | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 211 | 		break; | 
 | 212 | 	} | 
 | 213 | 	return r; | 
 | 214 | } | 
 | 215 |  | 
 | 216 | long kvm_arch_dev_ioctl(struct file *filp, | 
 | 217 | 			unsigned int ioctl, unsigned long arg) | 
 | 218 | { | 
 | 219 | 	return -EINVAL; | 
 | 220 | } | 
 | 221 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 222 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 
 | 223 | 				   struct kvm_memory_slot *memslot, | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 224 | 				   struct kvm_userspace_memory_region *mem, | 
| Takuya Yoshikawa | 7b6195a | 2013-02-27 19:44:34 +0900 | [diff] [blame] | 225 | 				   enum kvm_mr_change change) | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 226 | { | 
 | 227 | 	return 0; | 
 | 228 | } | 
 | 229 |  | 
 | 230 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 
 | 231 | 				   struct kvm_userspace_memory_region *mem, | 
| Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 232 | 				   const struct kvm_memory_slot *old, | 
 | 233 | 				   enum kvm_mr_change change) | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 234 | { | 
 | 235 | } | 
 | 236 |  | 
 | 237 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | 
 | 238 | { | 
 | 239 | } | 
 | 240 |  | 
 | 241 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | 
 | 242 | 				   struct kvm_memory_slot *slot) | 
 | 243 | { | 
 | 244 | } | 
 | 245 |  | 
 | 246 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 
 | 247 | { | 
 | 248 | 	int err; | 
 | 249 | 	struct kvm_vcpu *vcpu; | 
 | 250 |  | 
 | 251 | 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | 
 | 252 | 	if (!vcpu) { | 
 | 253 | 		err = -ENOMEM; | 
 | 254 | 		goto out; | 
 | 255 | 	} | 
 | 256 |  | 
 | 257 | 	err = kvm_vcpu_init(vcpu, kvm, id); | 
 | 258 | 	if (err) | 
 | 259 | 		goto free_vcpu; | 
 | 260 |  | 
| Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 261 | 	err = create_hyp_mappings(vcpu, vcpu + 1); | 
 | 262 | 	if (err) | 
 | 263 | 		goto vcpu_uninit; | 
 | 264 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 265 | 	return vcpu; | 
| Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 266 | vcpu_uninit: | 
 | 267 | 	kvm_vcpu_uninit(vcpu); | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 268 | free_vcpu: | 
 | 269 | 	kmem_cache_free(kvm_vcpu_cache, vcpu); | 
 | 270 | out: | 
 | 271 | 	return ERR_PTR(err); | 
 | 272 | } | 
 | 273 |  | 
 | 274 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | 
 | 275 | { | 
 | 276 | 	return 0; | 
 | 277 | } | 
 | 278 |  | 
 | 279 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | 
 | 280 | { | 
| Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 281 | 	kvm_mmu_free_memory_caches(vcpu); | 
| Marc Zyngier | 967f842 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 282 | 	kvm_timer_vcpu_terminate(vcpu); | 
| Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 283 | 	kmem_cache_free(kvm_vcpu_cache, vcpu); | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 284 | } | 
 | 285 |  | 
 | 286 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 
 | 287 | { | 
 | 288 | 	kvm_arch_vcpu_free(vcpu); | 
 | 289 | } | 
 | 290 |  | 
 | 291 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 
 | 292 | { | 
 | 293 | 	return 0; | 
 | 294 | } | 
 | 295 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 296 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 
 | 297 | { | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 298 | 	int ret; | 
 | 299 |  | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 300 | 	/* Force users to call KVM_ARM_VCPU_INIT */ | 
 | 301 | 	vcpu->arch.target = -1; | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 302 |  | 
 | 303 | 	/* Set up VGIC */ | 
 | 304 | 	ret = kvm_vgic_vcpu_init(vcpu); | 
 | 305 | 	if (ret) | 
 | 306 | 		return ret; | 
 | 307 |  | 
| Marc Zyngier | 967f842 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 308 | 	/* Set up the timer */ | 
 | 309 | 	kvm_timer_vcpu_init(vcpu); | 
 | 310 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 311 | 	return 0; | 
 | 312 | } | 
 | 313 |  | 
 | 314 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 
 | 315 | { | 
 | 316 | } | 
 | 317 |  | 
 | 318 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 
 | 319 | { | 
| Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 320 | 	vcpu->cpu = cpu; | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 321 | 	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); | 
| Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 322 |  | 
 | 323 | 	/* | 
 | 324 | 	 * Check whether this vcpu requires the cache to be flushed on | 
 | 325 | 	 * this physical CPU. This is a consequence of doing dcache | 
 | 326 | 	 * operations by set/way on this vcpu. We do it here to be in | 
 | 327 | 	 * a non-preemptible section. | 
 | 328 | 	 */ | 
 | 329 | 	if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) | 
 | 330 | 		flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ | 
| Marc Zyngier | 1638a12 | 2013-01-21 19:36:11 -0500 | [diff] [blame] | 331 |  | 
 | 332 | 	kvm_arm_set_running_vcpu(vcpu); | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 333 | } | 
 | 334 |  | 
 | 335 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 
 | 336 | { | 
| Marc Zyngier | 1638a12 | 2013-01-21 19:36:11 -0500 | [diff] [blame] | 337 | 	kvm_arm_set_running_vcpu(NULL); | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 338 | } | 
 | 339 |  | 
 | 340 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 
 | 341 | 					struct kvm_guest_debug *dbg) | 
 | 342 | { | 
 | 343 | 	return -EINVAL; | 
 | 344 | } | 
 | 345 |  | 
 | 346 |  | 
 | 347 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 
 | 348 | 				    struct kvm_mp_state *mp_state) | 
 | 349 | { | 
 | 350 | 	return -EINVAL; | 
 | 351 | } | 
 | 352 |  | 
 | 353 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 
 | 354 | 				    struct kvm_mp_state *mp_state) | 
 | 355 | { | 
 | 356 | 	return -EINVAL; | 
 | 357 | } | 
 | 358 |  | 
| Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 359 | /** | 
 | 360 |  * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled | 
 | 361 |  * @v:		The VCPU pointer | 
 | 362 |  * | 
 | 363 |  * If the guest CPU is not waiting for interrupts or an interrupt line is | 
 | 364 |  * asserted, the CPU is by definition runnable. | 
 | 365 |  */ | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 366 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 
 | 367 | { | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 368 | 	return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v); | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 369 | } | 
 | 370 |  | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 371 | /* Just ensure a guest exit from a particular CPU */ | 
 | 372 | static void exit_vm_noop(void *info) | 
 | 373 | { | 
 | 374 | } | 
 | 375 |  | 
 | 376 | void force_vm_exit(const cpumask_t *mask) | 
 | 377 | { | 
 | 378 | 	smp_call_function_many(mask, exit_vm_noop, NULL, true); | 
 | 379 | } | 
 | 380 |  | 
 | 381 | /** | 
 | 382 |  * need_new_vmid_gen - check that the VMID is still valid | 
 | 383 |  * @kvm: The VM's VMID to checkt | 
 | 384 |  * | 
 | 385 |  * return true if there is a new generation of VMIDs being used | 
 | 386 |  * | 
 | 387 |  * The hardware supports only 256 values with the value zero reserved for the | 
 | 388 |  * host, so we check if an assigned value belongs to a previous generation, | 
 | 389 |  * which which requires us to assign a new value. If we're the first to use a | 
 | 390 |  * VMID for the new generation, we must flush necessary caches and TLBs on all | 
 | 391 |  * CPUs. | 
 | 392 |  */ | 
 | 393 | static bool need_new_vmid_gen(struct kvm *kvm) | 
 | 394 | { | 
 | 395 | 	return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); | 
 | 396 | } | 
 | 397 |  | 
 | 398 | /** | 
 | 399 |  * update_vttbr - Update the VTTBR with a valid VMID before the guest runs | 
 | 400 |  * @kvm	The guest that we are about to run | 
 | 401 |  * | 
 | 402 |  * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the | 
 | 403 |  * VM has a valid VMID, otherwise assigns a new one and flushes corresponding | 
 | 404 |  * caches and TLBs. | 
 | 405 |  */ | 
 | 406 | static void update_vttbr(struct kvm *kvm) | 
 | 407 | { | 
 | 408 | 	phys_addr_t pgd_phys; | 
 | 409 | 	u64 vmid; | 
 | 410 |  | 
 | 411 | 	if (!need_new_vmid_gen(kvm)) | 
 | 412 | 		return; | 
 | 413 |  | 
 | 414 | 	spin_lock(&kvm_vmid_lock); | 
 | 415 |  | 
 | 416 | 	/* | 
 | 417 | 	 * We need to re-check the vmid_gen here to ensure that if another vcpu | 
 | 418 | 	 * already allocated a valid vmid for this vm, then this vcpu should | 
 | 419 | 	 * use the same vmid. | 
 | 420 | 	 */ | 
 | 421 | 	if (!need_new_vmid_gen(kvm)) { | 
 | 422 | 		spin_unlock(&kvm_vmid_lock); | 
 | 423 | 		return; | 
 | 424 | 	} | 
 | 425 |  | 
 | 426 | 	/* First user of a new VMID generation? */ | 
 | 427 | 	if (unlikely(kvm_next_vmid == 0)) { | 
 | 428 | 		atomic64_inc(&kvm_vmid_gen); | 
 | 429 | 		kvm_next_vmid = 1; | 
 | 430 |  | 
 | 431 | 		/* | 
 | 432 | 		 * On SMP we know no other CPUs can use this CPU's or each | 
 | 433 | 		 * other's VMID after force_vm_exit returns since the | 
 | 434 | 		 * kvm_vmid_lock blocks them from reentry to the guest. | 
 | 435 | 		 */ | 
 | 436 | 		force_vm_exit(cpu_all_mask); | 
 | 437 | 		/* | 
 | 438 | 		 * Now broadcast TLB + ICACHE invalidation over the inner | 
 | 439 | 		 * shareable domain to make sure all data structures are | 
 | 440 | 		 * clean. | 
 | 441 | 		 */ | 
 | 442 | 		kvm_call_hyp(__kvm_flush_vm_context); | 
 | 443 | 	} | 
 | 444 |  | 
 | 445 | 	kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); | 
 | 446 | 	kvm->arch.vmid = kvm_next_vmid; | 
 | 447 | 	kvm_next_vmid++; | 
 | 448 |  | 
 | 449 | 	/* update vttbr to be used with the new vmid */ | 
 | 450 | 	pgd_phys = virt_to_phys(kvm->arch.pgd); | 
 | 451 | 	vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; | 
 | 452 | 	kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; | 
 | 453 | 	kvm->arch.vttbr |= vmid; | 
 | 454 |  | 
 | 455 | 	spin_unlock(&kvm_vmid_lock); | 
 | 456 | } | 
 | 457 |  | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 458 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | 
 | 459 | { | 
 | 460 | 	if (likely(vcpu->arch.has_run_once)) | 
 | 461 | 		return 0; | 
 | 462 |  | 
 | 463 | 	vcpu->arch.has_run_once = true; | 
| Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 464 |  | 
 | 465 | 	/* | 
| Marc Zyngier | 01ac5e3 | 2013-01-21 19:36:16 -0500 | [diff] [blame] | 466 | 	 * Initialize the VGIC before running a vcpu the first time on | 
 | 467 | 	 * this VM. | 
 | 468 | 	 */ | 
 | 469 | 	if (irqchip_in_kernel(vcpu->kvm) && | 
 | 470 | 	    unlikely(!vgic_initialized(vcpu->kvm))) { | 
 | 471 | 		int ret = kvm_vgic_init(vcpu->kvm); | 
 | 472 | 		if (ret) | 
 | 473 | 			return ret; | 
 | 474 | 	} | 
 | 475 |  | 
 | 476 | 	/* | 
| Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 477 | 	 * Handle the "start in power-off" case by calling into the | 
 | 478 | 	 * PSCI code. | 
 | 479 | 	 */ | 
 | 480 | 	if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { | 
 | 481 | 		*vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; | 
 | 482 | 		kvm_psci_call(vcpu); | 
 | 483 | 	} | 
 | 484 |  | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 485 | 	return 0; | 
 | 486 | } | 
 | 487 |  | 
| Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 488 | static void vcpu_pause(struct kvm_vcpu *vcpu) | 
 | 489 | { | 
 | 490 | 	wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); | 
 | 491 |  | 
 | 492 | 	wait_event_interruptible(*wq, !vcpu->arch.pause); | 
 | 493 | } | 
 | 494 |  | 
| Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 495 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) | 
 | 496 | { | 
 | 497 | 	return vcpu->arch.target >= 0; | 
 | 498 | } | 
 | 499 |  | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 500 | /** | 
 | 501 |  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code | 
 | 502 |  * @vcpu:	The VCPU pointer | 
 | 503 |  * @run:	The kvm_run structure pointer used for userspace state exchange | 
 | 504 |  * | 
 | 505 |  * This function is called through the VCPU_RUN ioctl called from user space. It | 
 | 506 |  * will execute VM code in a loop until the time slice for the process is used | 
 | 507 |  * or some emulation is needed from user space in which case the function will | 
 | 508 |  * return with return value 0 and with the kvm_run structure filled in with the | 
 | 509 |  * required data for the requested emulation. | 
 | 510 |  */ | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 511 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | 
 | 512 | { | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 513 | 	int ret; | 
 | 514 | 	sigset_t sigsaved; | 
 | 515 |  | 
| Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 516 | 	if (unlikely(!kvm_vcpu_initialized(vcpu))) | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 517 | 		return -ENOEXEC; | 
 | 518 |  | 
 | 519 | 	ret = kvm_vcpu_first_run_init(vcpu); | 
 | 520 | 	if (ret) | 
 | 521 | 		return ret; | 
 | 522 |  | 
| Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 523 | 	if (run->exit_reason == KVM_EXIT_MMIO) { | 
 | 524 | 		ret = kvm_handle_mmio_return(vcpu, vcpu->run); | 
 | 525 | 		if (ret) | 
 | 526 | 			return ret; | 
 | 527 | 	} | 
 | 528 |  | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 529 | 	if (vcpu->sigset_active) | 
 | 530 | 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 
 | 531 |  | 
 | 532 | 	ret = 1; | 
 | 533 | 	run->exit_reason = KVM_EXIT_UNKNOWN; | 
 | 534 | 	while (ret > 0) { | 
 | 535 | 		/* | 
 | 536 | 		 * Check conditions before entering the guest | 
 | 537 | 		 */ | 
 | 538 | 		cond_resched(); | 
 | 539 |  | 
 | 540 | 		update_vttbr(vcpu->kvm); | 
 | 541 |  | 
| Marc Zyngier | aa024c2 | 2013-01-20 18:28:13 -0500 | [diff] [blame] | 542 | 		if (vcpu->arch.pause) | 
 | 543 | 			vcpu_pause(vcpu); | 
 | 544 |  | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 545 | 		kvm_vgic_flush_hwstate(vcpu); | 
| Marc Zyngier | c7e3ba6 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 546 | 		kvm_timer_flush_hwstate(vcpu); | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 547 |  | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 548 | 		local_irq_disable(); | 
 | 549 |  | 
 | 550 | 		/* | 
 | 551 | 		 * Re-check atomic conditions | 
 | 552 | 		 */ | 
 | 553 | 		if (signal_pending(current)) { | 
 | 554 | 			ret = -EINTR; | 
 | 555 | 			run->exit_reason = KVM_EXIT_INTR; | 
 | 556 | 		} | 
 | 557 |  | 
 | 558 | 		if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { | 
 | 559 | 			local_irq_enable(); | 
| Marc Zyngier | c7e3ba6 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 560 | 			kvm_timer_sync_hwstate(vcpu); | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 561 | 			kvm_vgic_sync_hwstate(vcpu); | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 562 | 			continue; | 
 | 563 | 		} | 
 | 564 |  | 
 | 565 | 		/************************************************************** | 
 | 566 | 		 * Enter the guest | 
 | 567 | 		 */ | 
 | 568 | 		trace_kvm_entry(*vcpu_pc(vcpu)); | 
 | 569 | 		kvm_guest_enter(); | 
 | 570 | 		vcpu->mode = IN_GUEST_MODE; | 
 | 571 |  | 
 | 572 | 		ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); | 
 | 573 |  | 
 | 574 | 		vcpu->mode = OUTSIDE_GUEST_MODE; | 
| Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 575 | 		vcpu->arch.last_pcpu = smp_processor_id(); | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 576 | 		kvm_guest_exit(); | 
 | 577 | 		trace_kvm_exit(*vcpu_pc(vcpu)); | 
 | 578 | 		/* | 
 | 579 | 		 * We may have taken a host interrupt in HYP mode (ie | 
 | 580 | 		 * while executing the guest). This interrupt is still | 
 | 581 | 		 * pending, as we haven't serviced it yet! | 
 | 582 | 		 * | 
 | 583 | 		 * We're now back in SVC mode, with interrupts | 
 | 584 | 		 * disabled.  Enabling the interrupts now will have | 
 | 585 | 		 * the effect of taking the interrupt again, in SVC | 
 | 586 | 		 * mode this time. | 
 | 587 | 		 */ | 
 | 588 | 		local_irq_enable(); | 
 | 589 |  | 
 | 590 | 		/* | 
 | 591 | 		 * Back from guest | 
 | 592 | 		 *************************************************************/ | 
 | 593 |  | 
| Marc Zyngier | c7e3ba6 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 594 | 		kvm_timer_sync_hwstate(vcpu); | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 595 | 		kvm_vgic_sync_hwstate(vcpu); | 
 | 596 |  | 
| Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 597 | 		ret = handle_exit(vcpu, run, ret); | 
 | 598 | 	} | 
 | 599 |  | 
 | 600 | 	if (vcpu->sigset_active) | 
 | 601 | 		sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 
 | 602 | 	return ret; | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 603 | } | 
 | 604 |  | 
| Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 605 | static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) | 
 | 606 | { | 
 | 607 | 	int bit_index; | 
 | 608 | 	bool set; | 
 | 609 | 	unsigned long *ptr; | 
 | 610 |  | 
 | 611 | 	if (number == KVM_ARM_IRQ_CPU_IRQ) | 
 | 612 | 		bit_index = __ffs(HCR_VI); | 
 | 613 | 	else /* KVM_ARM_IRQ_CPU_FIQ */ | 
 | 614 | 		bit_index = __ffs(HCR_VF); | 
 | 615 |  | 
 | 616 | 	ptr = (unsigned long *)&vcpu->arch.irq_lines; | 
 | 617 | 	if (level) | 
 | 618 | 		set = test_and_set_bit(bit_index, ptr); | 
 | 619 | 	else | 
 | 620 | 		set = test_and_clear_bit(bit_index, ptr); | 
 | 621 |  | 
 | 622 | 	/* | 
 | 623 | 	 * If we didn't change anything, no need to wake up or kick other CPUs | 
 | 624 | 	 */ | 
 | 625 | 	if (set == level) | 
 | 626 | 		return 0; | 
 | 627 |  | 
 | 628 | 	/* | 
 | 629 | 	 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and | 
 | 630 | 	 * trigger a world-switch round on the running physical CPU to set the | 
 | 631 | 	 * virtual IRQ/FIQ fields in the HCR appropriately. | 
 | 632 | 	 */ | 
 | 633 | 	kvm_vcpu_kick(vcpu); | 
 | 634 |  | 
 | 635 | 	return 0; | 
 | 636 | } | 
 | 637 |  | 
| Alexander Graf | 79558f1 | 2013-04-16 19:21:41 +0200 | [diff] [blame] | 638 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, | 
 | 639 | 			  bool line_status) | 
| Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 640 | { | 
 | 641 | 	u32 irq = irq_level->irq; | 
 | 642 | 	unsigned int irq_type, vcpu_idx, irq_num; | 
 | 643 | 	int nrcpus = atomic_read(&kvm->online_vcpus); | 
 | 644 | 	struct kvm_vcpu *vcpu = NULL; | 
 | 645 | 	bool level = irq_level->level; | 
 | 646 |  | 
 | 647 | 	irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; | 
 | 648 | 	vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; | 
 | 649 | 	irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; | 
 | 650 |  | 
 | 651 | 	trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); | 
 | 652 |  | 
| Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 653 | 	switch (irq_type) { | 
 | 654 | 	case KVM_ARM_IRQ_TYPE_CPU: | 
 | 655 | 		if (irqchip_in_kernel(kvm)) | 
 | 656 | 			return -ENXIO; | 
| Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 657 |  | 
| Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 658 | 		if (vcpu_idx >= nrcpus) | 
 | 659 | 			return -EINVAL; | 
| Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 660 |  | 
| Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 661 | 		vcpu = kvm_get_vcpu(kvm, vcpu_idx); | 
 | 662 | 		if (!vcpu) | 
 | 663 | 			return -EINVAL; | 
| Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 664 |  | 
| Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 665 | 		if (irq_num > KVM_ARM_IRQ_CPU_FIQ) | 
 | 666 | 			return -EINVAL; | 
| Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 667 |  | 
| Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 668 | 		return vcpu_interrupt_line(vcpu, irq_num, level); | 
 | 669 | 	case KVM_ARM_IRQ_TYPE_PPI: | 
 | 670 | 		if (!irqchip_in_kernel(kvm)) | 
 | 671 | 			return -ENXIO; | 
 | 672 |  | 
 | 673 | 		if (vcpu_idx >= nrcpus) | 
 | 674 | 			return -EINVAL; | 
 | 675 |  | 
 | 676 | 		vcpu = kvm_get_vcpu(kvm, vcpu_idx); | 
 | 677 | 		if (!vcpu) | 
 | 678 | 			return -EINVAL; | 
 | 679 |  | 
 | 680 | 		if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) | 
 | 681 | 			return -EINVAL; | 
 | 682 |  | 
 | 683 | 		return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); | 
 | 684 | 	case KVM_ARM_IRQ_TYPE_SPI: | 
 | 685 | 		if (!irqchip_in_kernel(kvm)) | 
 | 686 | 			return -ENXIO; | 
 | 687 |  | 
 | 688 | 		if (irq_num < VGIC_NR_PRIVATE_IRQS || | 
 | 689 | 		    irq_num > KVM_ARM_IRQ_GIC_MAX) | 
 | 690 | 			return -EINVAL; | 
 | 691 |  | 
 | 692 | 		return kvm_vgic_inject_irq(kvm, 0, irq_num, level); | 
 | 693 | 	} | 
 | 694 |  | 
 | 695 | 	return -EINVAL; | 
| Christoffer Dall | 86ce853 | 2013-01-20 18:28:08 -0500 | [diff] [blame] | 696 | } | 
 | 697 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 698 | long kvm_arch_vcpu_ioctl(struct file *filp, | 
 | 699 | 			 unsigned int ioctl, unsigned long arg) | 
 | 700 | { | 
 | 701 | 	struct kvm_vcpu *vcpu = filp->private_data; | 
 | 702 | 	void __user *argp = (void __user *)arg; | 
 | 703 |  | 
 | 704 | 	switch (ioctl) { | 
 | 705 | 	case KVM_ARM_VCPU_INIT: { | 
 | 706 | 		struct kvm_vcpu_init init; | 
 | 707 |  | 
 | 708 | 		if (copy_from_user(&init, argp, sizeof(init))) | 
 | 709 | 			return -EFAULT; | 
 | 710 |  | 
 | 711 | 		return kvm_vcpu_set_target(vcpu, &init); | 
 | 712 |  | 
 | 713 | 	} | 
 | 714 | 	case KVM_SET_ONE_REG: | 
 | 715 | 	case KVM_GET_ONE_REG: { | 
 | 716 | 		struct kvm_one_reg reg; | 
| Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 717 |  | 
 | 718 | 		if (unlikely(!kvm_vcpu_initialized(vcpu))) | 
 | 719 | 			return -ENOEXEC; | 
 | 720 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 721 | 		if (copy_from_user(®, argp, sizeof(reg))) | 
 | 722 | 			return -EFAULT; | 
 | 723 | 		if (ioctl == KVM_SET_ONE_REG) | 
 | 724 | 			return kvm_arm_set_reg(vcpu, ®); | 
 | 725 | 		else | 
 | 726 | 			return kvm_arm_get_reg(vcpu, ®); | 
 | 727 | 	} | 
 | 728 | 	case KVM_GET_REG_LIST: { | 
 | 729 | 		struct kvm_reg_list __user *user_list = argp; | 
 | 730 | 		struct kvm_reg_list reg_list; | 
 | 731 | 		unsigned n; | 
 | 732 |  | 
| Andre Przywara | e8180dc | 2013-05-09 00:28:06 +0200 | [diff] [blame] | 733 | 		if (unlikely(!kvm_vcpu_initialized(vcpu))) | 
 | 734 | 			return -ENOEXEC; | 
 | 735 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 736 | 		if (copy_from_user(®_list, user_list, sizeof(reg_list))) | 
 | 737 | 			return -EFAULT; | 
 | 738 | 		n = reg_list.n; | 
 | 739 | 		reg_list.n = kvm_arm_num_regs(vcpu); | 
 | 740 | 		if (copy_to_user(user_list, ®_list, sizeof(reg_list))) | 
 | 741 | 			return -EFAULT; | 
 | 742 | 		if (n < reg_list.n) | 
 | 743 | 			return -E2BIG; | 
 | 744 | 		return kvm_arm_copy_reg_indices(vcpu, user_list->reg); | 
 | 745 | 	} | 
 | 746 | 	default: | 
 | 747 | 		return -EINVAL; | 
 | 748 | 	} | 
 | 749 | } | 
 | 750 |  | 
 | 751 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | 
 | 752 | { | 
 | 753 | 	return -EINVAL; | 
 | 754 | } | 
 | 755 |  | 
| Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 756 | static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, | 
 | 757 | 					struct kvm_arm_device_addr *dev_addr) | 
 | 758 | { | 
| Christoffer Dall | 330690c | 2013-01-21 19:36:13 -0500 | [diff] [blame] | 759 | 	unsigned long dev_id, type; | 
 | 760 |  | 
 | 761 | 	dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> | 
 | 762 | 		KVM_ARM_DEVICE_ID_SHIFT; | 
 | 763 | 	type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> | 
 | 764 | 		KVM_ARM_DEVICE_TYPE_SHIFT; | 
 | 765 |  | 
 | 766 | 	switch (dev_id) { | 
 | 767 | 	case KVM_ARM_DEVICE_VGIC_V2: | 
 | 768 | 		if (!vgic_present) | 
 | 769 | 			return -ENXIO; | 
 | 770 | 		return kvm_vgic_set_addr(kvm, type, dev_addr->addr); | 
 | 771 | 	default: | 
 | 772 | 		return -ENODEV; | 
 | 773 | 	} | 
| Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 774 | } | 
 | 775 |  | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 776 | long kvm_arch_vm_ioctl(struct file *filp, | 
 | 777 | 		       unsigned int ioctl, unsigned long arg) | 
 | 778 | { | 
| Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 779 | 	struct kvm *kvm = filp->private_data; | 
 | 780 | 	void __user *argp = (void __user *)arg; | 
 | 781 |  | 
 | 782 | 	switch (ioctl) { | 
| Marc Zyngier | 5863c2c | 2013-01-21 19:36:15 -0500 | [diff] [blame] | 783 | 	case KVM_CREATE_IRQCHIP: { | 
 | 784 | 		if (vgic_present) | 
 | 785 | 			return kvm_vgic_create(kvm); | 
 | 786 | 		else | 
 | 787 | 			return -ENXIO; | 
 | 788 | 	} | 
| Christoffer Dall | 3401d546 | 2013-01-23 13:18:04 -0500 | [diff] [blame] | 789 | 	case KVM_ARM_SET_DEVICE_ADDR: { | 
 | 790 | 		struct kvm_arm_device_addr dev_addr; | 
 | 791 |  | 
 | 792 | 		if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) | 
 | 793 | 			return -EFAULT; | 
 | 794 | 		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); | 
 | 795 | 	} | 
 | 796 | 	default: | 
 | 797 | 		return -EINVAL; | 
 | 798 | 	} | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 799 | } | 
 | 800 |  | 
| Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 801 | static void cpu_init_hyp_mode(void *dummy) | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 802 | { | 
| Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 803 | 	unsigned long long boot_pgd_ptr; | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 804 | 	unsigned long long pgd_ptr; | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 805 | 	unsigned long hyp_stack_ptr; | 
 | 806 | 	unsigned long stack_page; | 
 | 807 | 	unsigned long vector_ptr; | 
 | 808 |  | 
 | 809 | 	/* Switch from the HYP stub to our own HYP init vector */ | 
| Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 810 | 	__hyp_set_vectors(kvm_get_idmap_vector()); | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 811 |  | 
| Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 812 | 	boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr(); | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 813 | 	pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 814 | 	stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); | 
 | 815 | 	hyp_stack_ptr = stack_page + PAGE_SIZE; | 
 | 816 | 	vector_ptr = (unsigned long)__kvm_hyp_vector; | 
 | 817 |  | 
| Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 818 | 	__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 819 | } | 
 | 820 |  | 
| Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 821 | static int hyp_init_cpu_notify(struct notifier_block *self, | 
 | 822 | 			       unsigned long action, void *cpu) | 
 | 823 | { | 
 | 824 | 	switch (action) { | 
 | 825 | 	case CPU_STARTING: | 
 | 826 | 	case CPU_STARTING_FROZEN: | 
 | 827 | 		cpu_init_hyp_mode(NULL); | 
 | 828 | 		break; | 
 | 829 | 	} | 
 | 830 |  | 
 | 831 | 	return NOTIFY_OK; | 
 | 832 | } | 
 | 833 |  | 
 | 834 | static struct notifier_block hyp_init_cpu_nb = { | 
 | 835 | 	.notifier_call = hyp_init_cpu_notify, | 
 | 836 | }; | 
 | 837 |  | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 838 | /** | 
 | 839 |  * Inits Hyp-mode on all online CPUs | 
 | 840 |  */ | 
 | 841 | static int init_hyp_mode(void) | 
 | 842 | { | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 843 | 	int cpu; | 
 | 844 | 	int err = 0; | 
 | 845 |  | 
 | 846 | 	/* | 
 | 847 | 	 * Allocate Hyp PGD and setup Hyp identity mapping | 
 | 848 | 	 */ | 
 | 849 | 	err = kvm_mmu_init(); | 
 | 850 | 	if (err) | 
 | 851 | 		goto out_err; | 
 | 852 |  | 
 | 853 | 	/* | 
 | 854 | 	 * It is probably enough to obtain the default on one | 
 | 855 | 	 * CPU. It's unlikely to be different on the others. | 
 | 856 | 	 */ | 
 | 857 | 	hyp_default_vectors = __hyp_get_vectors(); | 
 | 858 |  | 
 | 859 | 	/* | 
 | 860 | 	 * Allocate stack pages for Hypervisor-mode | 
 | 861 | 	 */ | 
 | 862 | 	for_each_possible_cpu(cpu) { | 
 | 863 | 		unsigned long stack_page; | 
 | 864 |  | 
 | 865 | 		stack_page = __get_free_page(GFP_KERNEL); | 
 | 866 | 		if (!stack_page) { | 
 | 867 | 			err = -ENOMEM; | 
 | 868 | 			goto out_free_stack_pages; | 
 | 869 | 		} | 
 | 870 |  | 
 | 871 | 		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; | 
 | 872 | 	} | 
 | 873 |  | 
 | 874 | 	/* | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 875 | 	 * Map the Hyp-code called directly from the host | 
 | 876 | 	 */ | 
 | 877 | 	err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); | 
 | 878 | 	if (err) { | 
 | 879 | 		kvm_err("Cannot map world-switch code\n"); | 
 | 880 | 		goto out_free_mappings; | 
 | 881 | 	} | 
 | 882 |  | 
 | 883 | 	/* | 
 | 884 | 	 * Map the Hyp stack pages | 
 | 885 | 	 */ | 
 | 886 | 	for_each_possible_cpu(cpu) { | 
 | 887 | 		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); | 
 | 888 | 		err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); | 
 | 889 |  | 
 | 890 | 		if (err) { | 
 | 891 | 			kvm_err("Cannot map hyp stack\n"); | 
 | 892 | 			goto out_free_mappings; | 
 | 893 | 		} | 
 | 894 | 	} | 
 | 895 |  | 
 | 896 | 	/* | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 897 | 	 * Map the host CPU structures | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 898 | 	 */ | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 899 | 	kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); | 
 | 900 | 	if (!kvm_host_cpu_state) { | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 901 | 		err = -ENOMEM; | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 902 | 		kvm_err("Cannot allocate host CPU state\n"); | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 903 | 		goto out_free_mappings; | 
 | 904 | 	} | 
 | 905 |  | 
 | 906 | 	for_each_possible_cpu(cpu) { | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 907 | 		kvm_cpu_context_t *cpu_ctxt; | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 908 |  | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 909 | 		cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); | 
 | 910 | 		err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1); | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 911 |  | 
 | 912 | 		if (err) { | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 913 | 			kvm_err("Cannot map host CPU state: %d\n", err); | 
 | 914 | 			goto out_free_context; | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 915 | 		} | 
 | 916 | 	} | 
 | 917 |  | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 918 | 	/* | 
| Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 919 | 	 * Execute the init code on each CPU. | 
 | 920 | 	 */ | 
 | 921 | 	on_each_cpu(cpu_init_hyp_mode, NULL, 1); | 
 | 922 |  | 
 | 923 | 	/* | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 924 | 	 * Init HYP view of VGIC | 
 | 925 | 	 */ | 
 | 926 | 	err = kvm_vgic_hyp_init(); | 
 | 927 | 	if (err) | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 928 | 		goto out_free_context; | 
| Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 929 |  | 
| Marc Zyngier | 01ac5e3 | 2013-01-21 19:36:16 -0500 | [diff] [blame] | 930 | #ifdef CONFIG_KVM_ARM_VGIC | 
 | 931 | 		vgic_present = true; | 
 | 932 | #endif | 
 | 933 |  | 
| Marc Zyngier | 967f842 | 2013-01-23 13:21:59 -0500 | [diff] [blame] | 934 | 	/* | 
 | 935 | 	 * Init HYP architected timer support | 
 | 936 | 	 */ | 
 | 937 | 	err = kvm_timer_hyp_init(); | 
 | 938 | 	if (err) | 
 | 939 | 		goto out_free_mappings; | 
 | 940 |  | 
| Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 941 | #ifndef CONFIG_HOTPLUG_CPU | 
 | 942 | 	free_boot_hyp_pgd(); | 
 | 943 | #endif | 
 | 944 |  | 
| Marc Zyngier | 210552c | 2013-03-05 03:18:00 +0000 | [diff] [blame] | 945 | 	kvm_perf_init(); | 
 | 946 |  | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 947 | 	kvm_info("Hyp mode initialized successfully\n"); | 
| Marc Zyngier | 210552c | 2013-03-05 03:18:00 +0000 | [diff] [blame] | 948 |  | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 949 | 	return 0; | 
| Marc Zyngier | 3de50da | 2013-04-08 16:47:19 +0100 | [diff] [blame] | 950 | out_free_context: | 
 | 951 | 	free_percpu(kvm_host_cpu_state); | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 952 | out_free_mappings: | 
| Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 953 | 	free_hyp_pgds(); | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 954 | out_free_stack_pages: | 
 | 955 | 	for_each_possible_cpu(cpu) | 
 | 956 | 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); | 
 | 957 | out_err: | 
 | 958 | 	kvm_err("error initializing Hyp mode: %d\n", err); | 
 | 959 | 	return err; | 
 | 960 | } | 
 | 961 |  | 
| Andre Przywara | d4e071c | 2013-04-17 12:52:01 +0200 | [diff] [blame] | 962 | static void check_kvm_target_cpu(void *ret) | 
 | 963 | { | 
 | 964 | 	*(int *)ret = kvm_target_cpu(); | 
 | 965 | } | 
 | 966 |  | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 967 | /** | 
 | 968 |  * Initialize Hyp-mode and memory mappings on all CPUs. | 
 | 969 |  */ | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 970 | int kvm_arch_init(void *opaque) | 
 | 971 | { | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 972 | 	int err; | 
| Andre Przywara | d4e071c | 2013-04-17 12:52:01 +0200 | [diff] [blame] | 973 | 	int ret, cpu; | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 974 |  | 
 | 975 | 	if (!is_hyp_mode_available()) { | 
 | 976 | 		kvm_err("HYP mode not available\n"); | 
 | 977 | 		return -ENODEV; | 
 | 978 | 	} | 
 | 979 |  | 
| Andre Przywara | d4e071c | 2013-04-17 12:52:01 +0200 | [diff] [blame] | 980 | 	for_each_online_cpu(cpu) { | 
 | 981 | 		smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); | 
 | 982 | 		if (ret < 0) { | 
 | 983 | 			kvm_err("Error, CPU %d not supported!\n", cpu); | 
 | 984 | 			return -ENODEV; | 
 | 985 | 		} | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 986 | 	} | 
 | 987 |  | 
 | 988 | 	err = init_hyp_mode(); | 
 | 989 | 	if (err) | 
 | 990 | 		goto out_err; | 
 | 991 |  | 
| Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 992 | 	err = register_cpu_notifier(&hyp_init_cpu_nb); | 
 | 993 | 	if (err) { | 
 | 994 | 		kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); | 
 | 995 | 		goto out_err; | 
 | 996 | 	} | 
 | 997 |  | 
| Christoffer Dall | 5b3e5e5 | 2013-01-20 18:28:09 -0500 | [diff] [blame] | 998 | 	kvm_coproc_table_init(); | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 999 | 	return 0; | 
| Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1000 | out_err: | 
 | 1001 | 	return err; | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1002 | } | 
 | 1003 |  | 
 | 1004 | /* NOP: Compiling as a module not supported */ | 
 | 1005 | void kvm_arch_exit(void) | 
 | 1006 | { | 
| Marc Zyngier | 210552c | 2013-03-05 03:18:00 +0000 | [diff] [blame] | 1007 | 	kvm_perf_teardown(); | 
| Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1008 | } | 
 | 1009 |  | 
 | 1010 | static int arm_init(void) | 
 | 1011 | { | 
 | 1012 | 	int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | 
 | 1013 | 	return rc; | 
 | 1014 | } | 
 | 1015 |  | 
 | 1016 | module_init(arm_init); |