blob: 4646c17f571cca0bcdc4bb253f91ad637edf52af [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050022#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050023#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050024#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050025#include <asm/kvm_arm.h>
26#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050027#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050028#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050029#include <asm/kvm_emulate.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050030
31#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050032
33extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
34
Marc Zyngier5a677ce2013-04-12 19:12:06 +010035static pgd_t *boot_hyp_pgd;
Marc Zyngier2fb41052013-04-12 19:12:03 +010036static pgd_t *hyp_pgd;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050037static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
38
Marc Zyngier5a677ce2013-04-12 19:12:06 +010039static void *init_bounce_page;
40static unsigned long hyp_idmap_start;
41static unsigned long hyp_idmap_end;
42static phys_addr_t hyp_idmap_vector;
43
Marc Zyngier48762762013-01-28 15:27:00 +000044static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050045{
Marc Zyngier48762762013-01-28 15:27:00 +000046 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050047}
48
Christoffer Dalld5d81842013-01-20 18:28:07 -050049static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
50 int min, int max)
51{
52 void *page;
53
54 BUG_ON(max > KVM_NR_MEM_OBJS);
55 if (cache->nobjs >= min)
56 return 0;
57 while (cache->nobjs < max) {
58 page = (void *)__get_free_page(PGALLOC_GFP);
59 if (!page)
60 return -ENOMEM;
61 cache->objects[cache->nobjs++] = page;
62 }
63 return 0;
64}
65
66static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
67{
68 while (mc->nobjs)
69 free_page((unsigned long)mc->objects[--mc->nobjs]);
70}
71
72static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
73{
74 void *p;
75
76 BUG_ON(!mc || !mc->nobjs);
77 p = mc->objects[--mc->nobjs];
78 return p;
79}
80
Marc Zyngier4f728272013-04-12 19:12:05 +010081static void clear_pud_entry(pud_t *pud)
Christoffer Dall342cd0a2013-01-20 18:28:06 -050082{
Marc Zyngier4f728272013-04-12 19:12:05 +010083 pmd_t *pmd_table = pmd_offset(pud, 0);
84 pud_clear(pud);
85 pmd_free(NULL, pmd_table);
86 put_page(virt_to_page(pud));
87}
Christoffer Dall342cd0a2013-01-20 18:28:06 -050088
Marc Zyngier4f728272013-04-12 19:12:05 +010089static void clear_pmd_entry(pmd_t *pmd)
90{
91 pte_t *pte_table = pte_offset_kernel(pmd, 0);
92 pmd_clear(pmd);
93 pte_free_kernel(NULL, pte_table);
94 put_page(virt_to_page(pmd));
95}
96
97static bool pmd_empty(pmd_t *pmd)
98{
99 struct page *pmd_page = virt_to_page(pmd);
100 return page_count(pmd_page) == 1;
101}
102
103static void clear_pte_entry(pte_t *pte)
104{
105 if (pte_present(*pte)) {
106 kvm_set_pte(pte, __pte(0));
107 put_page(virt_to_page(pte));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500108 }
109}
110
Marc Zyngier4f728272013-04-12 19:12:05 +0100111static bool pte_empty(pte_t *pte)
112{
113 struct page *pte_page = virt_to_page(pte);
114 return page_count(pte_page) == 1;
115}
116
117static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500118{
119 pgd_t *pgd;
120 pud_t *pud;
121 pmd_t *pmd;
Marc Zyngier4f728272013-04-12 19:12:05 +0100122 pte_t *pte;
123 unsigned long long addr = start, end = start + size;
124 u64 range;
Marc Zyngier000d3992013-03-05 02:43:17 +0000125
Marc Zyngier4f728272013-04-12 19:12:05 +0100126 while (addr < end) {
127 pgd = pgdp + pgd_index(addr);
128 pud = pud_offset(pgd, addr);
129 if (pud_none(*pud)) {
130 addr += PUD_SIZE;
131 continue;
132 }
Marc Zyngier000d3992013-03-05 02:43:17 +0000133
Marc Zyngier4f728272013-04-12 19:12:05 +0100134 pmd = pmd_offset(pud, addr);
135 if (pmd_none(*pmd)) {
136 addr += PMD_SIZE;
137 continue;
138 }
Marc Zyngier000d3992013-03-05 02:43:17 +0000139
Marc Zyngier4f728272013-04-12 19:12:05 +0100140 pte = pte_offset_kernel(pmd, addr);
141 clear_pte_entry(pte);
142 range = PAGE_SIZE;
143
144 /* If we emptied the pte, walk back up the ladder */
145 if (pte_empty(pte)) {
146 clear_pmd_entry(pmd);
147 range = PMD_SIZE;
148 if (pmd_empty(pmd)) {
149 clear_pud_entry(pud);
150 range = PUD_SIZE;
151 }
152 }
153
154 addr += range;
155 }
Marc Zyngier000d3992013-03-05 02:43:17 +0000156}
157
158/**
Marc Zyngier4f728272013-04-12 19:12:05 +0100159 * free_hyp_pgds - free Hyp-mode page tables
Marc Zyngier000d3992013-03-05 02:43:17 +0000160 *
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100161 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
162 * therefore contains either mappings in the kernel memory area (above
163 * PAGE_OFFSET), or device mappings in the vmalloc range (from
164 * VMALLOC_START to VMALLOC_END).
165 *
166 * boot_hyp_pgd should only map two pages for the init code.
Marc Zyngier000d3992013-03-05 02:43:17 +0000167 */
Marc Zyngier4f728272013-04-12 19:12:05 +0100168void free_hyp_pgds(void)
Marc Zyngier000d3992013-03-05 02:43:17 +0000169{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500170 unsigned long addr;
171
172 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier4f728272013-04-12 19:12:05 +0100173
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100174 if (boot_hyp_pgd) {
175 unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
176 unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
177 kfree(boot_hyp_pgd);
178 }
179
Marc Zyngier4f728272013-04-12 19:12:05 +0100180 if (hyp_pgd) {
181 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
182 unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
183 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
184 unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
185 kfree(hyp_pgd);
186 }
187
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100188 kfree(init_bounce_page);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500189 mutex_unlock(&kvm_hyp_pgd_mutex);
190}
191
192static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100193 unsigned long end, unsigned long pfn,
194 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500195{
196 pte_t *pte;
197 unsigned long addr;
198
Marc Zyngier3562c762013-04-12 19:12:02 +0100199 addr = start;
200 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100201 pte = pte_offset_kernel(pmd, addr);
202 kvm_set_pte(pte, pfn_pte(pfn, prot));
Marc Zyngier4f728272013-04-12 19:12:05 +0100203 get_page(virt_to_page(pte));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100204 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
Marc Zyngier6060df82013-04-12 19:12:01 +0100205 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100206 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500207}
208
209static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100210 unsigned long end, unsigned long pfn,
211 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500212{
213 pmd_t *pmd;
214 pte_t *pte;
215 unsigned long addr, next;
216
Marc Zyngier3562c762013-04-12 19:12:02 +0100217 addr = start;
218 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100219 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500220
221 BUG_ON(pmd_sect(*pmd));
222
223 if (pmd_none(*pmd)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100224 pte = pte_alloc_one_kernel(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500225 if (!pte) {
226 kvm_err("Cannot allocate Hyp pte\n");
227 return -ENOMEM;
228 }
229 pmd_populate_kernel(NULL, pmd, pte);
Marc Zyngier4f728272013-04-12 19:12:05 +0100230 get_page(virt_to_page(pmd));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100231 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500232 }
233
234 next = pmd_addr_end(addr, end);
235
Marc Zyngier6060df82013-04-12 19:12:01 +0100236 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
237 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100238 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500239
240 return 0;
241}
242
Marc Zyngier6060df82013-04-12 19:12:01 +0100243static int __create_hyp_mappings(pgd_t *pgdp,
244 unsigned long start, unsigned long end,
245 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500246{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500247 pgd_t *pgd;
248 pud_t *pud;
249 pmd_t *pmd;
250 unsigned long addr, next;
251 int err = 0;
252
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500253 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100254 addr = start & PAGE_MASK;
255 end = PAGE_ALIGN(end);
256 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100257 pgd = pgdp + pgd_index(addr);
258 pud = pud_offset(pgd, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500259
260 if (pud_none_or_clear_bad(pud)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100261 pmd = pmd_alloc_one(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500262 if (!pmd) {
263 kvm_err("Cannot allocate Hyp pmd\n");
264 err = -ENOMEM;
265 goto out;
266 }
267 pud_populate(NULL, pud, pmd);
Marc Zyngier4f728272013-04-12 19:12:05 +0100268 get_page(virt_to_page(pud));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100269 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500270 }
271
272 next = pgd_addr_end(addr, end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100273 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500274 if (err)
275 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100276 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100277 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500278out:
279 mutex_unlock(&kvm_hyp_pgd_mutex);
280 return err;
281}
282
283/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100284 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500285 * @from: The virtual kernel start address of the range
286 * @to: The virtual kernel end address of the range (exclusive)
287 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100288 * The same virtual address as the kernel virtual address is also used
289 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
290 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500291 */
292int create_hyp_mappings(void *from, void *to)
293{
Marc Zyngier6060df82013-04-12 19:12:01 +0100294 unsigned long phys_addr = virt_to_phys(from);
295 unsigned long start = KERN_TO_HYP((unsigned long)from);
296 unsigned long end = KERN_TO_HYP((unsigned long)to);
297
298 /* Check for a valid kernel memory mapping */
299 if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
300 return -EINVAL;
301
302 return __create_hyp_mappings(hyp_pgd, start, end,
303 __phys_to_pfn(phys_addr), PAGE_HYP);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500304}
305
306/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100307 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
308 * @from: The kernel start VA of the range
309 * @to: The kernel end VA of the range (exclusive)
Marc Zyngier6060df82013-04-12 19:12:01 +0100310 * @phys_addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100311 *
312 * The resulting HYP VA is the same as the kernel VA, modulo
313 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500314 */
Marc Zyngier6060df82013-04-12 19:12:01 +0100315int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500316{
Marc Zyngier6060df82013-04-12 19:12:01 +0100317 unsigned long start = KERN_TO_HYP((unsigned long)from);
318 unsigned long end = KERN_TO_HYP((unsigned long)to);
319
320 /* Check for a valid kernel IO mapping */
321 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
322 return -EINVAL;
323
324 return __create_hyp_mappings(hyp_pgd, start, end,
325 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500326}
327
Christoffer Dalld5d81842013-01-20 18:28:07 -0500328/**
329 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
330 * @kvm: The KVM struct pointer for the VM.
331 *
332 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
333 * support either full 40-bit input addresses or limited to 32-bit input
334 * addresses). Clears the allocated pages.
335 *
336 * Note we don't need locking here as this is only called when the VM is
337 * created, which can only be done once.
338 */
339int kvm_alloc_stage2_pgd(struct kvm *kvm)
340{
341 pgd_t *pgd;
342
343 if (kvm->arch.pgd != NULL) {
344 kvm_err("kvm_arch already initialized?\n");
345 return -EINVAL;
346 }
347
348 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
349 if (!pgd)
350 return -ENOMEM;
351
352 /* stage-2 pgd must be aligned to its size */
353 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
354
355 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100356 kvm_clean_pgd(pgd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500357 kvm->arch.pgd = pgd;
358
359 return 0;
360}
361
Christoffer Dalld5d81842013-01-20 18:28:07 -0500362/**
363 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
364 * @kvm: The VM pointer
365 * @start: The intermediate physical base address of the range to unmap
366 * @size: The size of the area to unmap
367 *
368 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
369 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
370 * destroying the VM), otherwise another faulting VCPU may come in and mess
371 * with things behind our backs.
372 */
373static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
374{
Marc Zyngier4f728272013-04-12 19:12:05 +0100375 unmap_range(kvm->arch.pgd, start, size);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500376}
377
378/**
379 * kvm_free_stage2_pgd - free all stage-2 tables
380 * @kvm: The KVM struct pointer for the VM.
381 *
382 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
383 * underlying level-2 and level-3 tables before freeing the actual level-1 table
384 * and setting the struct pointer to NULL.
385 *
386 * Note we don't need locking here as this is only called when the VM is
387 * destroyed, which can only be done once.
388 */
389void kvm_free_stage2_pgd(struct kvm *kvm)
390{
391 if (kvm->arch.pgd == NULL)
392 return;
393
394 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
395 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
396 kvm->arch.pgd = NULL;
397}
398
399
400static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
401 phys_addr_t addr, const pte_t *new_pte, bool iomap)
402{
403 pgd_t *pgd;
404 pud_t *pud;
405 pmd_t *pmd;
406 pte_t *pte, old_pte;
407
408 /* Create 2nd stage page table mapping - Level 1 */
409 pgd = kvm->arch.pgd + pgd_index(addr);
410 pud = pud_offset(pgd, addr);
411 if (pud_none(*pud)) {
412 if (!cache)
413 return 0; /* ignore calls from kvm_set_spte_hva */
414 pmd = mmu_memory_cache_alloc(cache);
415 pud_populate(NULL, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500416 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100417 }
418
419 pmd = pmd_offset(pud, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500420
421 /* Create 2nd stage page table mapping - Level 2 */
422 if (pmd_none(*pmd)) {
423 if (!cache)
424 return 0; /* ignore calls from kvm_set_spte_hva */
425 pte = mmu_memory_cache_alloc(cache);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100426 kvm_clean_pte(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500427 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500428 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100429 }
430
431 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500432
433 if (iomap && pte_present(*pte))
434 return -EFAULT;
435
436 /* Create 2nd stage page table mapping - Level 3 */
437 old_pte = *pte;
438 kvm_set_pte(pte, *new_pte);
439 if (pte_present(old_pte))
Marc Zyngier48762762013-01-28 15:27:00 +0000440 kvm_tlb_flush_vmid_ipa(kvm, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500441 else
442 get_page(virt_to_page(pte));
443
444 return 0;
445}
446
447/**
448 * kvm_phys_addr_ioremap - map a device range to guest IPA
449 *
450 * @kvm: The KVM pointer
451 * @guest_ipa: The IPA at which to insert the mapping
452 * @pa: The physical address of the device
453 * @size: The size of the mapping
454 */
455int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
456 phys_addr_t pa, unsigned long size)
457{
458 phys_addr_t addr, end;
459 int ret = 0;
460 unsigned long pfn;
461 struct kvm_mmu_memory_cache cache = { 0, };
462
463 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
464 pfn = __phys_to_pfn(pa);
465
466 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100467 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
468 kvm_set_s2pte_writable(&pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500469
470 ret = mmu_topup_memory_cache(&cache, 2, 2);
471 if (ret)
472 goto out;
473 spin_lock(&kvm->mmu_lock);
474 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
475 spin_unlock(&kvm->mmu_lock);
476 if (ret)
477 goto out;
478
479 pfn++;
480 }
481
482out:
483 mmu_free_memory_cache(&cache);
484 return ret;
485}
486
Christoffer Dall94f8e642013-01-20 18:28:12 -0500487static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
488 gfn_t gfn, struct kvm_memory_slot *memslot,
489 unsigned long fault_status)
490{
491 pte_t new_pte;
492 pfn_t pfn;
493 int ret;
494 bool write_fault, writable;
495 unsigned long mmu_seq;
496 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
497
Marc Zyngier7393b592012-09-17 19:27:09 +0100498 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500499 if (fault_status == FSC_PERM && !write_fault) {
500 kvm_err("Unexpected L2 read permission error\n");
501 return -EFAULT;
502 }
503
504 /* We need minimum second+third level pages */
505 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
506 if (ret)
507 return ret;
508
509 mmu_seq = vcpu->kvm->mmu_notifier_seq;
510 /*
511 * Ensure the read of mmu_notifier_seq happens before we call
512 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
513 * the page we just got a reference to gets unmapped before we have a
514 * chance to grab the mmu_lock, which ensure that if the page gets
515 * unmapped afterwards, the call to kvm_unmap_hva will take it away
516 * from us again properly. This smp_rmb() interacts with the smp_wmb()
517 * in kvm_mmu_notifier_invalidate_<page|range_end>.
518 */
519 smp_rmb();
520
521 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
522 if (is_error_pfn(pfn))
523 return -EFAULT;
524
525 new_pte = pfn_pte(pfn, PAGE_S2);
526 coherent_icache_guest_page(vcpu->kvm, gfn);
527
528 spin_lock(&vcpu->kvm->mmu_lock);
529 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
530 goto out_unlock;
531 if (writable) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100532 kvm_set_s2pte_writable(&new_pte);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500533 kvm_set_pfn_dirty(pfn);
534 }
535 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
536
537out_unlock:
538 spin_unlock(&vcpu->kvm->mmu_lock);
539 kvm_release_pfn_clean(pfn);
540 return 0;
541}
542
543/**
544 * kvm_handle_guest_abort - handles all 2nd stage aborts
545 * @vcpu: the VCPU pointer
546 * @run: the kvm_run structure
547 *
548 * Any abort that gets to the host is almost guaranteed to be caused by a
549 * missing second stage translation table entry, which can mean that either the
550 * guest simply needs more memory and we must allocate an appropriate page or it
551 * can mean that the guest tried to access I/O memory, which is emulated by user
552 * space. The distinction is based on the IPA causing the fault and whether this
553 * memory region has been registered as standard RAM by user space.
554 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500555int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
556{
Christoffer Dall94f8e642013-01-20 18:28:12 -0500557 unsigned long fault_status;
558 phys_addr_t fault_ipa;
559 struct kvm_memory_slot *memslot;
560 bool is_iabt;
561 gfn_t gfn;
562 int ret, idx;
563
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100564 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier7393b592012-09-17 19:27:09 +0100565 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500566
Marc Zyngier7393b592012-09-17 19:27:09 +0100567 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
568 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500569
570 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier1cc287d2012-09-18 14:14:35 +0100571 fault_status = kvm_vcpu_trap_get_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500572 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100573 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
574 kvm_vcpu_trap_get_class(vcpu), fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500575 return -EFAULT;
576 }
577
578 idx = srcu_read_lock(&vcpu->kvm->srcu);
579
580 gfn = fault_ipa >> PAGE_SHIFT;
581 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
582 if (is_iabt) {
583 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +0100584 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500585 ret = 1;
586 goto out_unlock;
587 }
588
589 if (fault_status != FSC_FAULT) {
590 kvm_err("Unsupported fault status on io memory: %#lx\n",
591 fault_status);
592 ret = -EFAULT;
593 goto out_unlock;
594 }
595
Marc Zyngiercfe39502012-12-12 14:42:09 +0000596 /*
597 * The IPA is reported as [MAX:12], so we need to
598 * complement it with the bottom 12 bits from the
599 * faulting VA. This is always 12 bits, irrespective
600 * of the page size.
601 */
602 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500603 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500604 goto out_unlock;
605 }
606
607 memslot = gfn_to_memslot(vcpu->kvm, gfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500608
609 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
610 if (ret == 0)
611 ret = 1;
612out_unlock:
613 srcu_read_unlock(&vcpu->kvm->srcu, idx);
614 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500615}
616
Christoffer Dalld5d81842013-01-20 18:28:07 -0500617static void handle_hva_to_gpa(struct kvm *kvm,
618 unsigned long start,
619 unsigned long end,
620 void (*handler)(struct kvm *kvm,
621 gpa_t gpa, void *data),
622 void *data)
623{
624 struct kvm_memslots *slots;
625 struct kvm_memory_slot *memslot;
626
627 slots = kvm_memslots(kvm);
628
629 /* we only care about the pages that the guest sees */
630 kvm_for_each_memslot(memslot, slots) {
631 unsigned long hva_start, hva_end;
632 gfn_t gfn, gfn_end;
633
634 hva_start = max(start, memslot->userspace_addr);
635 hva_end = min(end, memslot->userspace_addr +
636 (memslot->npages << PAGE_SHIFT));
637 if (hva_start >= hva_end)
638 continue;
639
640 /*
641 * {gfn(page) | page intersects with [hva_start, hva_end)} =
642 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
643 */
644 gfn = hva_to_gfn_memslot(hva_start, memslot);
645 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
646
647 for (; gfn < gfn_end; ++gfn) {
648 gpa_t gpa = gfn << PAGE_SHIFT;
649 handler(kvm, gpa, data);
650 }
651 }
652}
653
654static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
655{
656 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
Marc Zyngier48762762013-01-28 15:27:00 +0000657 kvm_tlb_flush_vmid_ipa(kvm, gpa);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500658}
659
660int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
661{
662 unsigned long end = hva + PAGE_SIZE;
663
664 if (!kvm->arch.pgd)
665 return 0;
666
667 trace_kvm_unmap_hva(hva);
668 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
669 return 0;
670}
671
672int kvm_unmap_hva_range(struct kvm *kvm,
673 unsigned long start, unsigned long end)
674{
675 if (!kvm->arch.pgd)
676 return 0;
677
678 trace_kvm_unmap_hva_range(start, end);
679 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
680 return 0;
681}
682
683static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
684{
685 pte_t *pte = (pte_t *)data;
686
687 stage2_set_pte(kvm, NULL, gpa, pte, false);
688}
689
690
691void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
692{
693 unsigned long end = hva + PAGE_SIZE;
694 pte_t stage2_pte;
695
696 if (!kvm->arch.pgd)
697 return;
698
699 trace_kvm_set_spte_hva(hva);
700 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
701 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
702}
703
704void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
705{
706 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
707}
708
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500709phys_addr_t kvm_mmu_get_httbr(void)
710{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500711 return virt_to_phys(hyp_pgd);
712}
713
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100714phys_addr_t kvm_mmu_get_boot_httbr(void)
715{
716 return virt_to_phys(boot_hyp_pgd);
717}
718
719phys_addr_t kvm_get_idmap_vector(void)
720{
721 return hyp_idmap_vector;
722}
723
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500724int kvm_mmu_init(void)
725{
Marc Zyngier2fb41052013-04-12 19:12:03 +0100726 int err;
727
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100728 hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
729 hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
730 hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
731
732 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
733 /*
734 * Our init code is crossing a page boundary. Allocate
735 * a bounce page, copy the code over and use that.
736 */
737 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
738 phys_addr_t phys_base;
739
740 init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
741 if (!init_bounce_page) {
742 kvm_err("Couldn't allocate HYP init bounce page\n");
743 err = -ENOMEM;
744 goto out;
745 }
746
747 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
748 /*
749 * Warning: the code we just copied to the bounce page
750 * must be flushed to the point of coherency.
751 * Otherwise, the data may be sitting in L2, and HYP
752 * mode won't be able to observe it as it runs with
753 * caches off at that point.
754 */
755 kvm_flush_dcache_to_poc(init_bounce_page, len);
756
757 phys_base = virt_to_phys(init_bounce_page);
758 hyp_idmap_vector += phys_base - hyp_idmap_start;
759 hyp_idmap_start = phys_base;
760 hyp_idmap_end = phys_base + len;
761
762 kvm_info("Using HYP init bounce page @%lx\n",
763 (unsigned long)phys_base);
764 }
765
Marc Zyngier2fb41052013-04-12 19:12:03 +0100766 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100767 boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
768 if (!hyp_pgd || !boot_hyp_pgd) {
Christoffer Dalld5d81842013-01-20 18:28:07 -0500769 kvm_err("Hyp mode PGD not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +0100770 err = -ENOMEM;
771 goto out;
772 }
773
774 /* Create the idmap in the boot page tables */
775 err = __create_hyp_mappings(boot_hyp_pgd,
776 hyp_idmap_start, hyp_idmap_end,
777 __phys_to_pfn(hyp_idmap_start),
778 PAGE_HYP);
779
780 if (err) {
781 kvm_err("Failed to idmap %lx-%lx\n",
782 hyp_idmap_start, hyp_idmap_end);
783 goto out;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500784 }
785
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100786 /* Map the very same page at the trampoline VA */
787 err = __create_hyp_mappings(boot_hyp_pgd,
788 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
789 __phys_to_pfn(hyp_idmap_start),
790 PAGE_HYP);
791 if (err) {
792 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
793 TRAMPOLINE_VA);
794 goto out;
795 }
796
797 /* Map the same page again into the runtime page tables */
798 err = __create_hyp_mappings(hyp_pgd,
799 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
800 __phys_to_pfn(hyp_idmap_start),
801 PAGE_HYP);
802 if (err) {
803 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
804 TRAMPOLINE_VA);
805 goto out;
806 }
807
Christoffer Dalld5d81842013-01-20 18:28:07 -0500808 return 0;
Marc Zyngier2fb41052013-04-12 19:12:03 +0100809out:
Marc Zyngier4f728272013-04-12 19:12:05 +0100810 free_hyp_pgds();
Marc Zyngier2fb41052013-04-12 19:12:03 +0100811 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500812}