blob: ec14269a791cf9de5af535b380eea7315ddf3147 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050022#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050023#include <asm/idmap.h>
24#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050025#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050026#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050028#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050029#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050030#include <asm/kvm_emulate.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050031
32#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050033
34extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35
36static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
37
Christoffer Dalld5d81842013-01-20 18:28:07 -050038static void kvm_tlb_flush_vmid(struct kvm *kvm)
39{
40 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
41}
42
Christoffer Dalld5d81842013-01-20 18:28:07 -050043static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
44 int min, int max)
45{
46 void *page;
47
48 BUG_ON(max > KVM_NR_MEM_OBJS);
49 if (cache->nobjs >= min)
50 return 0;
51 while (cache->nobjs < max) {
52 page = (void *)__get_free_page(PGALLOC_GFP);
53 if (!page)
54 return -ENOMEM;
55 cache->objects[cache->nobjs++] = page;
56 }
57 return 0;
58}
59
60static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
61{
62 while (mc->nobjs)
63 free_page((unsigned long)mc->objects[--mc->nobjs]);
64}
65
66static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
67{
68 void *p;
69
70 BUG_ON(!mc || !mc->nobjs);
71 p = mc->objects[--mc->nobjs];
72 return p;
73}
74
Christoffer Dall342cd0a2013-01-20 18:28:06 -050075static void free_ptes(pmd_t *pmd, unsigned long addr)
76{
77 pte_t *pte;
78 unsigned int i;
79
80 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
81 if (!pmd_none(*pmd) && pmd_table(*pmd)) {
82 pte = pte_offset_kernel(pmd, addr);
83 pte_free_kernel(NULL, pte);
84 }
85 pmd++;
86 }
87}
88
89/**
90 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
91 *
92 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
93 * only mappings in the kernel memory area, which is above PAGE_OFFSET.
94 */
95void free_hyp_pmds(void)
96{
97 pgd_t *pgd;
98 pud_t *pud;
99 pmd_t *pmd;
100 unsigned long addr;
101
102 mutex_lock(&kvm_hyp_pgd_mutex);
103 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100104 unsigned long hyp_addr = KERN_TO_HYP(addr);
105 pgd = hyp_pgd + pgd_index(hyp_addr);
106 pud = pud_offset(pgd, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500107
108 if (pud_none(*pud))
109 continue;
110 BUG_ON(pud_bad(*pud));
111
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100112 pmd = pmd_offset(pud, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500113 free_ptes(pmd, addr);
114 pmd_free(NULL, pmd);
115 pud_clear(pud);
116 }
117 mutex_unlock(&kvm_hyp_pgd_mutex);
118}
119
120static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
121 unsigned long end)
122{
123 pte_t *pte;
124 unsigned long addr;
125 struct page *page;
126
127 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100128 unsigned long hyp_addr = KERN_TO_HYP(addr);
129
130 pte = pte_offset_kernel(pmd, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500131 BUG_ON(!virt_addr_valid(addr));
132 page = virt_to_page(addr);
133 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
134 }
135}
136
137static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
138 unsigned long end,
139 unsigned long *pfn_base)
140{
141 pte_t *pte;
142 unsigned long addr;
143
144 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100145 unsigned long hyp_addr = KERN_TO_HYP(addr);
146
147 pte = pte_offset_kernel(pmd, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500148 BUG_ON(pfn_valid(*pfn_base));
149 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
150 (*pfn_base)++;
151 }
152}
153
154static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
155 unsigned long end, unsigned long *pfn_base)
156{
157 pmd_t *pmd;
158 pte_t *pte;
159 unsigned long addr, next;
160
161 for (addr = start; addr < end; addr = next) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100162 unsigned long hyp_addr = KERN_TO_HYP(addr);
163 pmd = pmd_offset(pud, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500164
165 BUG_ON(pmd_sect(*pmd));
166
167 if (pmd_none(*pmd)) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100168 pte = pte_alloc_one_kernel(NULL, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500169 if (!pte) {
170 kvm_err("Cannot allocate Hyp pte\n");
171 return -ENOMEM;
172 }
173 pmd_populate_kernel(NULL, pmd, pte);
174 }
175
176 next = pmd_addr_end(addr, end);
177
178 /*
179 * If pfn_base is NULL, we map kernel pages into HYP with the
180 * virtual address. Otherwise, this is considered an I/O
181 * mapping and we map the physical region starting at
182 * *pfn_base to [start, end[.
183 */
184 if (!pfn_base)
185 create_hyp_pte_mappings(pmd, addr, next);
186 else
187 create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
188 }
189
190 return 0;
191}
192
193static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
194{
195 unsigned long start = (unsigned long)from;
196 unsigned long end = (unsigned long)to;
197 pgd_t *pgd;
198 pud_t *pud;
199 pmd_t *pmd;
200 unsigned long addr, next;
201 int err = 0;
202
Marc Zyngierb4034bd2012-10-28 11:52:57 +0000203 if (start >= end)
204 return -EINVAL;
205 /* Check for a valid kernel memory mapping */
206 if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1)))
207 return -EINVAL;
208 /* Check for a valid kernel IO mapping */
209 if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500210 return -EINVAL;
211
212 mutex_lock(&kvm_hyp_pgd_mutex);
213 for (addr = start; addr < end; addr = next) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100214 unsigned long hyp_addr = KERN_TO_HYP(addr);
215 pgd = hyp_pgd + pgd_index(hyp_addr);
216 pud = pud_offset(pgd, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500217
218 if (pud_none_or_clear_bad(pud)) {
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100219 pmd = pmd_alloc_one(NULL, hyp_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500220 if (!pmd) {
221 kvm_err("Cannot allocate Hyp pmd\n");
222 err = -ENOMEM;
223 goto out;
224 }
225 pud_populate(NULL, pud, pmd);
226 }
227
228 next = pgd_addr_end(addr, end);
229 err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
230 if (err)
231 goto out;
232 }
233out:
234 mutex_unlock(&kvm_hyp_pgd_mutex);
235 return err;
236}
237
238/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100239 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500240 * @from: The virtual kernel start address of the range
241 * @to: The virtual kernel end address of the range (exclusive)
242 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100243 * The same virtual address as the kernel virtual address is also used
244 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
245 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500246 *
247 * Note: Wrapping around zero in the "to" address is not supported.
248 */
249int create_hyp_mappings(void *from, void *to)
250{
251 return __create_hyp_mappings(from, to, NULL);
252}
253
254/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100255 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
256 * @from: The kernel start VA of the range
257 * @to: The kernel end VA of the range (exclusive)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500258 * @addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100259 *
260 * The resulting HYP VA is the same as the kernel VA, modulo
261 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500262 */
263int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
264{
265 unsigned long pfn = __phys_to_pfn(addr);
266 return __create_hyp_mappings(from, to, &pfn);
267}
268
Christoffer Dalld5d81842013-01-20 18:28:07 -0500269/**
270 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
271 * @kvm: The KVM struct pointer for the VM.
272 *
273 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
274 * support either full 40-bit input addresses or limited to 32-bit input
275 * addresses). Clears the allocated pages.
276 *
277 * Note we don't need locking here as this is only called when the VM is
278 * created, which can only be done once.
279 */
280int kvm_alloc_stage2_pgd(struct kvm *kvm)
281{
282 pgd_t *pgd;
283
284 if (kvm->arch.pgd != NULL) {
285 kvm_err("kvm_arch already initialized?\n");
286 return -EINVAL;
287 }
288
289 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
290 if (!pgd)
291 return -ENOMEM;
292
293 /* stage-2 pgd must be aligned to its size */
294 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
295
296 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100297 kvm_clean_pgd(pgd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500298 kvm->arch.pgd = pgd;
299
300 return 0;
301}
302
303static void clear_pud_entry(pud_t *pud)
304{
305 pmd_t *pmd_table = pmd_offset(pud, 0);
306 pud_clear(pud);
307 pmd_free(NULL, pmd_table);
308 put_page(virt_to_page(pud));
309}
310
311static void clear_pmd_entry(pmd_t *pmd)
312{
313 pte_t *pte_table = pte_offset_kernel(pmd, 0);
314 pmd_clear(pmd);
315 pte_free_kernel(NULL, pte_table);
316 put_page(virt_to_page(pmd));
317}
318
319static bool pmd_empty(pmd_t *pmd)
320{
321 struct page *pmd_page = virt_to_page(pmd);
322 return page_count(pmd_page) == 1;
323}
324
325static void clear_pte_entry(pte_t *pte)
326{
327 if (pte_present(*pte)) {
328 kvm_set_pte(pte, __pte(0));
329 put_page(virt_to_page(pte));
330 }
331}
332
333static bool pte_empty(pte_t *pte)
334{
335 struct page *pte_page = virt_to_page(pte);
336 return page_count(pte_page) == 1;
337}
338
339/**
340 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
341 * @kvm: The VM pointer
342 * @start: The intermediate physical base address of the range to unmap
343 * @size: The size of the area to unmap
344 *
345 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
346 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
347 * destroying the VM), otherwise another faulting VCPU may come in and mess
348 * with things behind our backs.
349 */
350static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
351{
352 pgd_t *pgd;
353 pud_t *pud;
354 pmd_t *pmd;
355 pte_t *pte;
356 phys_addr_t addr = start, end = start + size;
357 u64 range;
358
359 while (addr < end) {
360 pgd = kvm->arch.pgd + pgd_index(addr);
361 pud = pud_offset(pgd, addr);
362 if (pud_none(*pud)) {
363 addr += PUD_SIZE;
364 continue;
365 }
366
367 pmd = pmd_offset(pud, addr);
368 if (pmd_none(*pmd)) {
369 addr += PMD_SIZE;
370 continue;
371 }
372
373 pte = pte_offset_kernel(pmd, addr);
374 clear_pte_entry(pte);
375 range = PAGE_SIZE;
376
377 /* If we emptied the pte, walk back up the ladder */
378 if (pte_empty(pte)) {
379 clear_pmd_entry(pmd);
380 range = PMD_SIZE;
381 if (pmd_empty(pmd)) {
382 clear_pud_entry(pud);
383 range = PUD_SIZE;
384 }
385 }
386
387 addr += range;
388 }
389}
390
391/**
392 * kvm_free_stage2_pgd - free all stage-2 tables
393 * @kvm: The KVM struct pointer for the VM.
394 *
395 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
396 * underlying level-2 and level-3 tables before freeing the actual level-1 table
397 * and setting the struct pointer to NULL.
398 *
399 * Note we don't need locking here as this is only called when the VM is
400 * destroyed, which can only be done once.
401 */
402void kvm_free_stage2_pgd(struct kvm *kvm)
403{
404 if (kvm->arch.pgd == NULL)
405 return;
406
407 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
408 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
409 kvm->arch.pgd = NULL;
410}
411
412
413static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
414 phys_addr_t addr, const pte_t *new_pte, bool iomap)
415{
416 pgd_t *pgd;
417 pud_t *pud;
418 pmd_t *pmd;
419 pte_t *pte, old_pte;
420
421 /* Create 2nd stage page table mapping - Level 1 */
422 pgd = kvm->arch.pgd + pgd_index(addr);
423 pud = pud_offset(pgd, addr);
424 if (pud_none(*pud)) {
425 if (!cache)
426 return 0; /* ignore calls from kvm_set_spte_hva */
427 pmd = mmu_memory_cache_alloc(cache);
428 pud_populate(NULL, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500429 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100430 }
431
432 pmd = pmd_offset(pud, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500433
434 /* Create 2nd stage page table mapping - Level 2 */
435 if (pmd_none(*pmd)) {
436 if (!cache)
437 return 0; /* ignore calls from kvm_set_spte_hva */
438 pte = mmu_memory_cache_alloc(cache);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100439 kvm_clean_pte(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500440 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500441 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100442 }
443
444 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500445
446 if (iomap && pte_present(*pte))
447 return -EFAULT;
448
449 /* Create 2nd stage page table mapping - Level 3 */
450 old_pte = *pte;
451 kvm_set_pte(pte, *new_pte);
452 if (pte_present(old_pte))
453 kvm_tlb_flush_vmid(kvm);
454 else
455 get_page(virt_to_page(pte));
456
457 return 0;
458}
459
460/**
461 * kvm_phys_addr_ioremap - map a device range to guest IPA
462 *
463 * @kvm: The KVM pointer
464 * @guest_ipa: The IPA at which to insert the mapping
465 * @pa: The physical address of the device
466 * @size: The size of the mapping
467 */
468int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
469 phys_addr_t pa, unsigned long size)
470{
471 phys_addr_t addr, end;
472 int ret = 0;
473 unsigned long pfn;
474 struct kvm_mmu_memory_cache cache = { 0, };
475
476 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
477 pfn = __phys_to_pfn(pa);
478
479 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100480 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
481 kvm_set_s2pte_writable(&pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500482
483 ret = mmu_topup_memory_cache(&cache, 2, 2);
484 if (ret)
485 goto out;
486 spin_lock(&kvm->mmu_lock);
487 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
488 spin_unlock(&kvm->mmu_lock);
489 if (ret)
490 goto out;
491
492 pfn++;
493 }
494
495out:
496 mmu_free_memory_cache(&cache);
497 return ret;
498}
499
Christoffer Dall94f8e642013-01-20 18:28:12 -0500500static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
501 gfn_t gfn, struct kvm_memory_slot *memslot,
502 unsigned long fault_status)
503{
504 pte_t new_pte;
505 pfn_t pfn;
506 int ret;
507 bool write_fault, writable;
508 unsigned long mmu_seq;
509 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
510
Marc Zyngier7393b592012-09-17 19:27:09 +0100511 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500512 if (fault_status == FSC_PERM && !write_fault) {
513 kvm_err("Unexpected L2 read permission error\n");
514 return -EFAULT;
515 }
516
517 /* We need minimum second+third level pages */
518 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
519 if (ret)
520 return ret;
521
522 mmu_seq = vcpu->kvm->mmu_notifier_seq;
523 /*
524 * Ensure the read of mmu_notifier_seq happens before we call
525 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
526 * the page we just got a reference to gets unmapped before we have a
527 * chance to grab the mmu_lock, which ensure that if the page gets
528 * unmapped afterwards, the call to kvm_unmap_hva will take it away
529 * from us again properly. This smp_rmb() interacts with the smp_wmb()
530 * in kvm_mmu_notifier_invalidate_<page|range_end>.
531 */
532 smp_rmb();
533
534 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
535 if (is_error_pfn(pfn))
536 return -EFAULT;
537
538 new_pte = pfn_pte(pfn, PAGE_S2);
539 coherent_icache_guest_page(vcpu->kvm, gfn);
540
541 spin_lock(&vcpu->kvm->mmu_lock);
542 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
543 goto out_unlock;
544 if (writable) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100545 kvm_set_s2pte_writable(&new_pte);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500546 kvm_set_pfn_dirty(pfn);
547 }
548 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
549
550out_unlock:
551 spin_unlock(&vcpu->kvm->mmu_lock);
552 kvm_release_pfn_clean(pfn);
553 return 0;
554}
555
556/**
557 * kvm_handle_guest_abort - handles all 2nd stage aborts
558 * @vcpu: the VCPU pointer
559 * @run: the kvm_run structure
560 *
561 * Any abort that gets to the host is almost guaranteed to be caused by a
562 * missing second stage translation table entry, which can mean that either the
563 * guest simply needs more memory and we must allocate an appropriate page or it
564 * can mean that the guest tried to access I/O memory, which is emulated by user
565 * space. The distinction is based on the IPA causing the fault and whether this
566 * memory region has been registered as standard RAM by user space.
567 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500568int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
569{
Christoffer Dall94f8e642013-01-20 18:28:12 -0500570 unsigned long fault_status;
571 phys_addr_t fault_ipa;
572 struct kvm_memory_slot *memslot;
573 bool is_iabt;
574 gfn_t gfn;
575 int ret, idx;
576
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100577 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier7393b592012-09-17 19:27:09 +0100578 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500579
Marc Zyngier7393b592012-09-17 19:27:09 +0100580 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
581 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500582
583 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier1cc287d2012-09-18 14:14:35 +0100584 fault_status = kvm_vcpu_trap_get_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500585 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100586 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
587 kvm_vcpu_trap_get_class(vcpu), fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500588 return -EFAULT;
589 }
590
591 idx = srcu_read_lock(&vcpu->kvm->srcu);
592
593 gfn = fault_ipa >> PAGE_SHIFT;
594 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
595 if (is_iabt) {
596 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +0100597 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500598 ret = 1;
599 goto out_unlock;
600 }
601
602 if (fault_status != FSC_FAULT) {
603 kvm_err("Unsupported fault status on io memory: %#lx\n",
604 fault_status);
605 ret = -EFAULT;
606 goto out_unlock;
607 }
608
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500609 /* Adjust page offset */
Marc Zyngier7393b592012-09-17 19:27:09 +0100610 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ~PAGE_MASK;
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500611 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500612 goto out_unlock;
613 }
614
615 memslot = gfn_to_memslot(vcpu->kvm, gfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500616
617 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
618 if (ret == 0)
619 ret = 1;
620out_unlock:
621 srcu_read_unlock(&vcpu->kvm->srcu, idx);
622 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500623}
624
Christoffer Dalld5d81842013-01-20 18:28:07 -0500625static void handle_hva_to_gpa(struct kvm *kvm,
626 unsigned long start,
627 unsigned long end,
628 void (*handler)(struct kvm *kvm,
629 gpa_t gpa, void *data),
630 void *data)
631{
632 struct kvm_memslots *slots;
633 struct kvm_memory_slot *memslot;
634
635 slots = kvm_memslots(kvm);
636
637 /* we only care about the pages that the guest sees */
638 kvm_for_each_memslot(memslot, slots) {
639 unsigned long hva_start, hva_end;
640 gfn_t gfn, gfn_end;
641
642 hva_start = max(start, memslot->userspace_addr);
643 hva_end = min(end, memslot->userspace_addr +
644 (memslot->npages << PAGE_SHIFT));
645 if (hva_start >= hva_end)
646 continue;
647
648 /*
649 * {gfn(page) | page intersects with [hva_start, hva_end)} =
650 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
651 */
652 gfn = hva_to_gfn_memslot(hva_start, memslot);
653 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
654
655 for (; gfn < gfn_end; ++gfn) {
656 gpa_t gpa = gfn << PAGE_SHIFT;
657 handler(kvm, gpa, data);
658 }
659 }
660}
661
662static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
663{
664 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
665 kvm_tlb_flush_vmid(kvm);
666}
667
668int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
669{
670 unsigned long end = hva + PAGE_SIZE;
671
672 if (!kvm->arch.pgd)
673 return 0;
674
675 trace_kvm_unmap_hva(hva);
676 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
677 return 0;
678}
679
680int kvm_unmap_hva_range(struct kvm *kvm,
681 unsigned long start, unsigned long end)
682{
683 if (!kvm->arch.pgd)
684 return 0;
685
686 trace_kvm_unmap_hva_range(start, end);
687 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
688 return 0;
689}
690
691static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
692{
693 pte_t *pte = (pte_t *)data;
694
695 stage2_set_pte(kvm, NULL, gpa, pte, false);
696}
697
698
699void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
700{
701 unsigned long end = hva + PAGE_SIZE;
702 pte_t stage2_pte;
703
704 if (!kvm->arch.pgd)
705 return;
706
707 trace_kvm_set_spte_hva(hva);
708 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
709 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
710}
711
712void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
713{
714 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
715}
716
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500717phys_addr_t kvm_mmu_get_httbr(void)
718{
719 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
720 return virt_to_phys(hyp_pgd);
721}
722
723int kvm_mmu_init(void)
724{
Christoffer Dalld5d81842013-01-20 18:28:07 -0500725 if (!hyp_pgd) {
726 kvm_err("Hyp mode PGD not allocated\n");
727 return -ENOMEM;
728 }
729
730 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500731}
732
733/**
734 * kvm_clear_idmap - remove all idmaps from the hyp pgd
735 *
736 * Free the underlying pmds for all pgds in range and clear the pgds (but
737 * don't free them) afterwards.
738 */
739void kvm_clear_hyp_idmap(void)
740{
741 unsigned long addr, end;
742 unsigned long next;
743 pgd_t *pgd = hyp_pgd;
744 pud_t *pud;
745 pmd_t *pmd;
746
747 addr = virt_to_phys(__hyp_idmap_text_start);
748 end = virt_to_phys(__hyp_idmap_text_end);
749
750 pgd += pgd_index(addr);
751 do {
752 next = pgd_addr_end(addr, end);
753 if (pgd_none_or_clear_bad(pgd))
754 continue;
755 pud = pud_offset(pgd, addr);
756 pmd = pmd_offset(pud, addr);
757
758 pud_clear(pud);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100759 kvm_clean_pmd_entry(pmd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500760 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
761 } while (pgd++, addr = next, addr < end);
762}