blob: c2ff7ea37b8c33726ef3def86b433dfd01229993 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080051#include <trace/events/xen.h>
52
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053#include <asm/pgtable.h>
54#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070055#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070056#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080057#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070058#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050059#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070060#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080061#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070062#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070063#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010064#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070065
66#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070067#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070068
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080069#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070#include <xen/page.h>
71#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010072#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080073#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080074#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080075#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070076
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070077#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070078#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070079#include "debugfs.h"
80
Alex Nixon19001c82009-02-09 12:05:46 -080081/*
82 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010083 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080084 */
85DEFINE_SPINLOCK(xen_reservation_lock);
86
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080087/*
88 * Identity map, in addition to plain kernel map. This needs to be
89 * large enough to allocate page table pages to allocate the rest.
90 * Each page can map 2MB.
91 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070092#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
93static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080094
95#ifdef CONFIG_X86_64
96/* l3 pud for userspace vsyscall mapping */
97static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
98#endif /* CONFIG_X86_64 */
99
100/*
101 * Note about cr3 (pagetable base) values:
102 *
103 * xen_cr3 contains the current logical cr3 value; it contains the
104 * last set cr3. This may not be the current effective cr3, because
105 * its update may be being lazily deferred. However, a vcpu looking
106 * at its own cr3 can use this value knowing that it everything will
107 * be self-consistent.
108 *
109 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
110 * hypercall to set the vcpu cr3 is complete (so it may be a little
111 * out of date, but it will never be set early). If one vcpu is
112 * looking at another vcpu's cr3 value, it should use this variable.
113 */
114DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
115DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
116
117
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700118/*
119 * Just beyond the highest usermode address. STACK_TOP_MAX has a
120 * redzone above it, so round it up to a PGD boundary.
121 */
122#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
123
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800124unsigned long arbitrary_virt_to_mfn(void *vaddr)
125{
126 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
127
128 return PFN_DOWN(maddr.maddr);
129}
130
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700131xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700132{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700133 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100134 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700135 pte_t *pte;
136 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700137
Chris Lalancette9f32d212008-10-23 17:40:25 -0700138 /*
139 * if the PFN is in the linear mapped vaddr range, we can just use
140 * the (quick) virt_to_machine() p2m lookup
141 */
142 if (virt_addr_valid(vaddr))
143 return virt_to_machine(vaddr);
144
145 /* otherwise we have to do a (slower) full page-table walk */
146
147 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700148 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700149 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700150 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700151}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100152EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700153
154void make_lowmem_page_readonly(void *vaddr)
155{
156 pte_t *pte, ptev;
157 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100158 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700159
Ingo Molnarf0646e42008-01-30 13:33:43 +0100160 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700161 if (pte == NULL)
162 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700163
164 ptev = pte_wrprotect(*pte);
165
166 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
167 BUG();
168}
169
170void make_lowmem_page_readwrite(void *vaddr)
171{
172 pte_t *pte, ptev;
173 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100174 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700175
Ingo Molnarf0646e42008-01-30 13:33:43 +0100176 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700177 if (pte == NULL)
178 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700179
180 ptev = pte_mkwrite(*pte);
181
182 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
183 BUG();
184}
185
186
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700187static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100188{
189 struct page *page = virt_to_page(ptr);
190
191 return PagePinned(page);
192}
193
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800194void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800195{
196 struct multicall_space mcs;
197 struct mmu_update *u;
198
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800199 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
200
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800201 mcs = xen_mc_entry(sizeof(*u));
202 u = mcs.args;
203
204 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800205 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800206 u->val = pte_val_ma(pteval);
207
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800208 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800209
210 xen_mc_issue(PARAVIRT_LAZY_MMU);
211}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800212EXPORT_SYMBOL_GPL(xen_set_domain_pte);
213
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700214static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700215{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700216 struct multicall_space mcs;
217 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700218
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700219 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
220
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700221 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700222 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700223 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700224 mcs = __xen_mc_entry(sizeof(*u));
225 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
226 }
227
228 u = mcs.args;
229 *u = *update;
230}
231
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800232static void xen_extend_mmuext_op(const struct mmuext_op *op)
233{
234 struct multicall_space mcs;
235 struct mmuext_op *u;
236
237 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
238
239 if (mcs.mc != NULL) {
240 mcs.mc->args[1]++;
241 } else {
242 mcs = __xen_mc_entry(sizeof(*u));
243 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
244 }
245
246 u = mcs.args;
247 *u = *op;
248}
249
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800250static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700251{
252 struct mmu_update u;
253
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700254 preempt_disable();
255
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700256 xen_mc_batch();
257
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700258 /* ptr may be ioremapped for 64-bit pagetable setup */
259 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700260 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700261 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700262
263 xen_mc_issue(PARAVIRT_LAZY_MMU);
264
265 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700266}
267
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800268static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100269{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800270 trace_xen_mmu_set_pmd(ptr, val);
271
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100272 /* If page is not pinned, we can just update the entry
273 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700274 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100275 *ptr = val;
276 return;
277 }
278
279 xen_set_pmd_hyper(ptr, val);
280}
281
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700282/*
283 * Associate a virtual page frame with a given physical page frame
284 * and protection flags for that frame.
285 */
286void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
287{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700288 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700289}
290
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800291static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
292{
293 struct mmu_update u;
294
295 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
296 return false;
297
298 xen_mc_batch();
299
300 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
301 u.val = pte_val_ma(pteval);
302 xen_extend_mmu_update(&u);
303
304 xen_mc_issue(PARAVIRT_LAZY_MMU);
305
306 return true;
307}
308
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800309static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800310{
David Vrabeld095d432012-07-09 11:39:05 +0100311 if (!xen_batched_set_pte(ptep, pteval)) {
312 /*
313 * Could call native_set_pte() here and trap and
314 * emulate the PTE write but with 32-bit guests this
315 * needs two traps (one for each of the two 32-bit
316 * words in the PTE) so do one hypercall directly
317 * instead.
318 */
319 struct mmu_update u;
320
321 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
322 u.val = pte_val_ma(pteval);
323 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
324 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800325}
326
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800327static void xen_set_pte(pte_t *ptep, pte_t pteval)
328{
329 trace_xen_mmu_set_pte(ptep, pteval);
330 __xen_set_pte(ptep, pteval);
331}
332
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800333static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700334 pte_t *ptep, pte_t pteval)
335{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800336 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
337 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700338}
339
Tejf63c2f22008-12-16 11:56:06 -0800340pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
341 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700342{
343 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800344 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700345 return *ptep;
346}
347
348void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
349 pte_t *ptep, pte_t pte)
350{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700351 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700352
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800353 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700354 xen_mc_batch();
355
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800356 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700357 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700358 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700359
360 xen_mc_issue(PARAVIRT_LAZY_MMU);
361}
362
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700363/* Assume pteval_t is equivalent to all the other *val_t types. */
364static pteval_t pte_mfn_to_pfn(pteval_t val)
365{
366 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700367 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400368 unsigned long pfn = mfn_to_pfn(mfn);
369
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700370 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400371 if (unlikely(pfn == ~0))
372 val = flags & ~_PAGE_PRESENT;
373 else
374 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700375 }
376
377 return val;
378}
379
380static pteval_t pte_pfn_to_mfn(pteval_t val)
381{
382 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700383 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700384 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500385 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700386
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500387 if (!xen_feature(XENFEAT_auto_translated_physmap))
388 mfn = get_phys_to_machine(pfn);
389 else
390 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700391 /*
392 * If there's no mfn for the pfn, then just create an
393 * empty non-present pte. Unfortunately this loses
394 * information about the original pfn, so
395 * pte_mfn_to_pfn is asymmetric.
396 */
397 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
398 mfn = 0;
399 flags = 0;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500400 } else {
401 /*
402 * Paramount to do this test _after_ the
403 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
404 * IDENTITY_FRAME_BIT resolves to true.
405 */
406 mfn &= ~FOREIGN_FRAME_BIT;
407 if (mfn & IDENTITY_FRAME_BIT) {
408 mfn &= ~IDENTITY_FRAME_BIT;
409 flags |= _PAGE_IOMAP;
410 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700411 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700412 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700413 }
414
415 return val;
416}
417
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800418static pteval_t iomap_pte(pteval_t val)
419{
420 if (val & _PAGE_PRESENT) {
421 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
422 pteval_t flags = val & PTE_FLAGS_MASK;
423
424 /* We assume the pte frame number is a MFN, so
425 just use it as-is. */
426 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
427 }
428
429 return val;
430}
431
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800432static pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700433{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700434 pteval_t pteval = pte.pte;
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500435#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700436 /* If this is a WC pte, convert back from Xen WC to Linux WC */
437 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
438 WARN_ON(!pat_enabled);
439 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
440 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500441#endif
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700442 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
443 return pteval;
444
445 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700446}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800447PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700448
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800449static pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700450{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700451 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700452}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800453PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700454
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700455/*
456 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
457 * are reserved for now, to correspond to the Intel-reserved PAT
458 * types.
459 *
460 * We expect Linux's PAT set as follows:
461 *
462 * Idx PTE flags Linux Xen Default
463 * 0 WB WB WB
464 * 1 PWT WC WT WT
465 * 2 PCD UC- UC- UC-
466 * 3 PCD PWT UC UC UC
467 * 4 PAT WB WC WB
468 * 5 PAT PWT WC WP WT
469 * 6 PAT PCD UC- UC UC-
470 * 7 PAT PCD PWT UC UC UC
471 */
472
473void xen_set_pat(u64 pat)
474{
475 /* We expect Linux to use a PAT setting of
476 * UC UC- WC WB (ignoring the PAT flag) */
477 WARN_ON(pat != 0x0007010600070106ull);
478}
479
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800480static pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700481{
Alex Nixon7347b402010-02-19 13:31:06 -0500482 phys_addr_t addr = (pte & PTE_PFN_MASK);
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500483#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700484 /* If Linux is trying to set a WC pte, then map to the Xen WC.
485 * If _PAGE_PAT is set, then it probably means it is really
486 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
487 * things work out OK...
488 *
489 * (We should never see kernel mappings with _PAGE_PSE set,
490 * but we could see hugetlbfs mappings, I think.).
491 */
492 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
493 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
494 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
495 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500496#endif
Alex Nixon7347b402010-02-19 13:31:06 -0500497 /*
498 * Unprivileged domains are allowed to do IOMAPpings for
499 * PCI passthrough, but not map ISA space. The ISA
500 * mappings are just dummy local mappings to keep other
501 * parts of the kernel happy.
502 */
503 if (unlikely(pte & _PAGE_IOMAP) &&
504 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800505 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500506 } else {
507 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800508 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500509 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800510
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700511 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700512}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800513PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700514
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800515static pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700516{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700517 pgd = pte_pfn_to_mfn(pgd);
518 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700519}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800520PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700521
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800522static pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700523{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700524 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700525}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800526PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100527
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800528static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700529{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700530 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700531
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700532 preempt_disable();
533
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700534 xen_mc_batch();
535
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700536 /* ptr may be ioremapped for 64-bit pagetable setup */
537 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700538 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700539 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700540
541 xen_mc_issue(PARAVIRT_LAZY_MMU);
542
543 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700544}
545
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800546static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100547{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800548 trace_xen_mmu_set_pud(ptr, val);
549
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100550 /* If page is not pinned, we can just update the entry
551 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700552 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100553 *ptr = val;
554 return;
555 }
556
557 xen_set_pud_hyper(ptr, val);
558}
559
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700560#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800561static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700562{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800563 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700564 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700565}
566
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800567static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700568{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800569 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800570 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
571 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700572}
573
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800574static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700575{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800576 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100577 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700578}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700579#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700580
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800581static pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700582{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700583 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700584 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700585}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800586PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700587
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700588#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800589static pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700590{
591 return pte_mfn_to_pfn(pud.pud);
592}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800593PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700594
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800595static pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700596{
597 pud = pte_pfn_to_mfn(pud);
598
599 return native_make_pud(pud);
600}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800601PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700602
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800603static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700604{
605 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
606 unsigned offset = pgd - pgd_page;
607 pgd_t *user_ptr = NULL;
608
609 if (offset < pgd_index(USER_LIMIT)) {
610 struct page *page = virt_to_page(pgd_page);
611 user_ptr = (pgd_t *)page->private;
612 if (user_ptr)
613 user_ptr += offset;
614 }
615
616 return user_ptr;
617}
618
619static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700620{
621 struct mmu_update u;
622
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700623 u.ptr = virt_to_machine(ptr).maddr;
624 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700625 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700626}
627
628/*
629 * Raw hypercall-based set_pgd, intended for in early boot before
630 * there's a page structure. This implies:
631 * 1. The only existing pagetable is the kernel's
632 * 2. It is always pinned
633 * 3. It has no user pagetable attached to it
634 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800635static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700636{
637 preempt_disable();
638
639 xen_mc_batch();
640
641 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700642
643 xen_mc_issue(PARAVIRT_LAZY_MMU);
644
645 preempt_enable();
646}
647
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800648static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700649{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700650 pgd_t *user_ptr = xen_get_user_pgd(ptr);
651
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800652 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
653
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700654 /* If page is not pinned, we can just update the entry
655 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700656 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700657 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700658 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700659 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700660 *user_ptr = val;
661 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700662 return;
663 }
664
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700665 /* If it's pinned, then we can at least batch the kernel and
666 user updates together. */
667 xen_mc_batch();
668
669 __xen_set_pgd_hyper(ptr, val);
670 if (user_ptr)
671 __xen_set_pgd_hyper(user_ptr, val);
672
673 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700674}
675#endif /* PAGETABLE_LEVELS == 4 */
676
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700677/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700678 * (Yet another) pagetable walker. This one is intended for pinning a
679 * pagetable. This means that it walks a pagetable and calls the
680 * callback function on each page it finds making up the page table,
681 * at every level. It walks the entire pagetable, but it only bothers
682 * pinning pte pages which are below limit. In the normal case this
683 * will be STACK_TOP_MAX, but at boot we need to pin up to
684 * FIXADDR_TOP.
685 *
686 * For 32-bit the important bit is that we don't pin beyond there,
687 * because then we start getting into Xen's ptes.
688 *
689 * For 64-bit, we must skip the Xen hole in the middle of the address
690 * space, just after the big x86-64 virtual hole.
691 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000692static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
693 int (*func)(struct mm_struct *mm, struct page *,
694 enum pt_level),
695 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700696{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700697 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700698 unsigned hole_low, hole_high;
699 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
700 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700701
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700702 /* The limit is the last byte to be touched */
703 limit--;
704 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700705
706 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700707 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700708
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700709 /*
710 * 64-bit has a great big hole in the middle of the address
711 * space, which contains the Xen mappings. On 32-bit these
712 * will end up making a zero-sized hole and so is a no-op.
713 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700714 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700715 hole_high = pgd_index(PAGE_OFFSET);
716
717 pgdidx_limit = pgd_index(limit);
718#if PTRS_PER_PUD > 1
719 pudidx_limit = pud_index(limit);
720#else
721 pudidx_limit = 0;
722#endif
723#if PTRS_PER_PMD > 1
724 pmdidx_limit = pmd_index(limit);
725#else
726 pmdidx_limit = 0;
727#endif
728
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700729 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700730 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700731
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700732 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700733 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700734
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700735 if (!pgd_val(pgd[pgdidx]))
736 continue;
737
738 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700739
740 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700741 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700742
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700743 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700744 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700745
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700746 if (pgdidx == pgdidx_limit &&
747 pudidx > pudidx_limit)
748 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700749
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700750 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700751 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700752
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700753 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700754
755 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700756 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700757
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700758 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
759 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700760
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700761 if (pgdidx == pgdidx_limit &&
762 pudidx == pudidx_limit &&
763 pmdidx > pmdidx_limit)
764 goto out;
765
766 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700767 continue;
768
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700769 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700770 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700771 }
772 }
773 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700774
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700775out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700776 /* Do the top level last, so that the callbacks can use it as
777 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700778 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700779
780 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700781}
782
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000783static int xen_pgd_walk(struct mm_struct *mm,
784 int (*func)(struct mm_struct *mm, struct page *,
785 enum pt_level),
786 unsigned long limit)
787{
788 return __xen_pgd_walk(mm, mm->pgd, func, limit);
789}
790
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700791/* If we're using split pte locks, then take the page's lock and
792 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700793static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700794{
795 spinlock_t *ptl = NULL;
796
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700797#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700798 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700799 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700800#endif
801
802 return ptl;
803}
804
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700805static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700806{
807 spinlock_t *ptl = v;
808 spin_unlock(ptl);
809}
810
811static void xen_do_pin(unsigned level, unsigned long pfn)
812{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800813 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700814
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800815 op.cmd = level;
816 op.arg1.mfn = pfn_to_mfn(pfn);
817
818 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700819}
820
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700821static int xen_pin_page(struct mm_struct *mm, struct page *page,
822 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700823{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700824 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700825 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700826
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700827 if (pgfl)
828 flush = 0; /* already pinned */
829 else if (PageHighMem(page))
830 /* kmaps need flushing if we found an unpinned
831 highpage */
832 flush = 1;
833 else {
834 void *pt = lowmem_page_address(page);
835 unsigned long pfn = page_to_pfn(page);
836 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700837 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700838
839 flush = 0;
840
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700841 /*
842 * We need to hold the pagetable lock between the time
843 * we make the pagetable RO and when we actually pin
844 * it. If we don't, then other users may come in and
845 * attempt to update the pagetable by writing it,
846 * which will fail because the memory is RO but not
847 * pinned, so Xen won't do the trap'n'emulate.
848 *
849 * If we're using split pte locks, we can't hold the
850 * entire pagetable's worth of locks during the
851 * traverse, because we may wrap the preempt count (8
852 * bits). The solution is to mark RO and pin each PTE
853 * page while holding the lock. This means the number
854 * of locks we end up holding is never more than a
855 * batch size (~32 entries, at present).
856 *
857 * If we're not using split pte locks, we needn't pin
858 * the PTE pages independently, because we're
859 * protected by the overall pagetable lock.
860 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700861 ptl = NULL;
862 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700863 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700864
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700865 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
866 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700867 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
868
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700869 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700870 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
871
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700872 /* Queue a deferred unlock for when this batch
873 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700874 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700875 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700876 }
877
878 return flush;
879}
880
881/* This is called just after a mm has been created, but it has not
882 been used yet. We need to make sure that its pagetable is all
883 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700884static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700885{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800886 trace_xen_mmu_pgd_pin(mm, pgd);
887
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700888 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700889
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000890 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100891 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700892 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100893
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700894 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100895
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700896 xen_mc_batch();
897 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700898
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700899#ifdef CONFIG_X86_64
900 {
901 pgd_t *user_pgd = xen_get_user_pgd(pgd);
902
903 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
904
905 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700906 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800907 xen_do_pin(MMUEXT_PIN_L4_TABLE,
908 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700909 }
910 }
911#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700912#ifdef CONFIG_X86_PAE
913 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800914 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700915 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700916#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100917 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700918#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700919 xen_mc_issue(0);
920}
921
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700922static void xen_pgd_pin(struct mm_struct *mm)
923{
924 __xen_pgd_pin(mm, mm->pgd);
925}
926
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100927/*
928 * On save, we need to pin all pagetables to make sure they get their
929 * mfns turned into pfns. Search the list for any unpinned pgds and pin
930 * them (unpinned pgds are not currently in use, probably because the
931 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700932 *
933 * Expected to be called in stop_machine() ("equivalent to taking
934 * every spinlock in the system"), so the locking doesn't really
935 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100936 */
937void xen_mm_pin_all(void)
938{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100939 struct page *page;
940
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800941 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100942
943 list_for_each_entry(page, &pgd_list, lru) {
944 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700945 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100946 SetPageSavePinned(page);
947 }
948 }
949
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800950 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100951}
952
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700953/*
954 * The init_mm pagetable is really pinned as soon as its created, but
955 * that's before we have page structures to store the bits. So do all
956 * the book-keeping now.
957 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400958static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700959 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700960{
961 SetPagePinned(page);
962 return 0;
963}
964
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700965static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700966{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700967 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700968}
969
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700970static int xen_unpin_page(struct mm_struct *mm, struct page *page,
971 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700972{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700973 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700974
975 if (pgfl && !PageHighMem(page)) {
976 void *pt = lowmem_page_address(page);
977 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700978 spinlock_t *ptl = NULL;
979 struct multicall_space mcs;
980
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700981 /*
982 * Do the converse to pin_page. If we're using split
983 * pte locks, we must be holding the lock for while
984 * the pte page is unpinned but still RO to prevent
985 * concurrent updates from seeing it in this
986 * partially-pinned state.
987 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700988 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700989 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700990
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700991 if (ptl)
992 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700993 }
994
995 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700996
997 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
998 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700999 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1000
1001 if (ptl) {
1002 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001003 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001004 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001005 }
1006
1007 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001008}
1009
1010/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001011static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001012{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -08001013 trace_xen_mmu_pgd_unpin(mm, pgd);
1014
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001015 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001016
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001017 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001018
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001019#ifdef CONFIG_X86_64
1020 {
1021 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1022
1023 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001024 xen_do_pin(MMUEXT_UNPIN_TABLE,
1025 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001026 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001027 }
1028 }
1029#endif
1030
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001031#ifdef CONFIG_X86_PAE
1032 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001033 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001034 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001035#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001036
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001037 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001038
1039 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001040}
1041
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001042static void xen_pgd_unpin(struct mm_struct *mm)
1043{
1044 __xen_pgd_unpin(mm, mm->pgd);
1045}
1046
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001047/*
1048 * On resume, undo any pinning done at save, so that the rest of the
1049 * kernel doesn't see any unexpected pinned pagetables.
1050 */
1051void xen_mm_unpin_all(void)
1052{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001053 struct page *page;
1054
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001055 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001056
1057 list_for_each_entry(page, &pgd_list, lru) {
1058 if (PageSavePinned(page)) {
1059 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001060 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001061 ClearPageSavePinned(page);
1062 }
1063 }
1064
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001065 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001066}
1067
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001068static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001069{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001070 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001071 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001072 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001073}
1074
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001075static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001076{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001077 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001078 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001079 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001080}
1081
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001082
1083#ifdef CONFIG_SMP
1084/* Another cpu may still have their %cr3 pointing at the pagetable, so
1085 we need to repoint it somewhere else before we can unpin it. */
1086static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001087{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001088 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001089 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001090
Alex Shi2113f462012-01-13 23:53:35 +08001091 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001092
Alex Shi2113f462012-01-13 23:53:35 +08001093 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001094 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001095
1096 /* If this cpu still has a stale cr3 reference, then make sure
1097 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001098 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001099 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001100}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001101
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001102static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001103{
Mike Travise4d98202008-12-16 17:34:05 -08001104 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001105 unsigned cpu;
1106
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001107 if (current->active_mm == mm) {
1108 if (current->mm == mm)
1109 load_cr3(swapper_pg_dir);
1110 else
1111 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001112 }
1113
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001114 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001115 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1116 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001117 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001118 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1119 continue;
1120 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1121 }
1122 return;
1123 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001124 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001125
1126 /* It's possible that a vcpu may have a stale reference to our
1127 cr3, because its in lazy mode, and it hasn't yet flushed
1128 its set of pending hypercalls yet. In this case, we can
1129 look at its actual current cr3 value, and force it to flush
1130 if needed. */
1131 for_each_online_cpu(cpu) {
1132 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001133 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001134 }
1135
Mike Travise4d98202008-12-16 17:34:05 -08001136 if (!cpumask_empty(mask))
1137 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1138 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001139}
1140#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001141static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001142{
1143 if (current->active_mm == mm)
1144 load_cr3(swapper_pg_dir);
1145}
1146#endif
1147
1148/*
1149 * While a process runs, Xen pins its pagetables, which means that the
1150 * hypervisor forces it to be read-only, and it controls all updates
1151 * to it. This means that all pagetable updates have to go via the
1152 * hypervisor, which is moderately expensive.
1153 *
1154 * Since we're pulling the pagetable down, we switch to use init_mm,
1155 * unpin old process pagetable and mark it all read-write, which
1156 * allows further operations on it to be simple memory accesses.
1157 *
1158 * The only subtle point is that another CPU may be still using the
1159 * pagetable because of lazy tlb flushing. This means we need need to
1160 * switch all CPUs off this pagetable before we can unpin it.
1161 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001162static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001163{
1164 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001165 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001166 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001167
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001168 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001169
1170 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001171 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001172 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001173
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001174 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001175}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001176
Attilio Rao7737b212012-08-21 21:22:38 +01001177static void __init xen_pagetable_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001178{
Attilio Rao843b8ed2012-08-21 21:22:39 +01001179 paging_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001180}
1181
Stefano Stabellini279b7062011-04-14 15:49:41 +01001182static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1183{
1184 /* reserve the range used */
1185 native_pagetable_reserve(start, end);
1186
1187 /* set as RW the rest */
1188 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1189 PFN_PHYS(pgt_buf_top));
1190 while (end < PFN_PHYS(pgt_buf_top)) {
1191 make_lowmem_page_readwrite(__va(end));
1192 end += PAGE_SIZE;
1193 }
1194}
1195
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001196static void xen_post_allocator_init(void);
1197
Daniel Kiper3f5089532011-05-12 17:19:53 -04001198static void __init xen_pagetable_setup_done(pgd_t *base)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001199{
1200 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001201 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001202}
1203
1204static void xen_write_cr2(unsigned long cr2)
1205{
Alex Shi2113f462012-01-13 23:53:35 +08001206 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001207}
1208
1209static unsigned long xen_read_cr2(void)
1210{
Alex Shi2113f462012-01-13 23:53:35 +08001211 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001212}
1213
1214unsigned long xen_read_cr2_direct(void)
1215{
Alex Shi2113f462012-01-13 23:53:35 +08001216 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001217}
1218
1219static void xen_flush_tlb(void)
1220{
1221 struct mmuext_op *op;
1222 struct multicall_space mcs;
1223
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001224 trace_xen_mmu_flush_tlb(0);
1225
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001226 preempt_disable();
1227
1228 mcs = xen_mc_entry(sizeof(*op));
1229
1230 op = mcs.args;
1231 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1232 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1233
1234 xen_mc_issue(PARAVIRT_LAZY_MMU);
1235
1236 preempt_enable();
1237}
1238
1239static void xen_flush_tlb_single(unsigned long addr)
1240{
1241 struct mmuext_op *op;
1242 struct multicall_space mcs;
1243
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001244 trace_xen_mmu_flush_tlb_single(addr);
1245
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001246 preempt_disable();
1247
1248 mcs = xen_mc_entry(sizeof(*op));
1249 op = mcs.args;
1250 op->cmd = MMUEXT_INVLPG_LOCAL;
1251 op->arg1.linear_addr = addr & PAGE_MASK;
1252 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1253
1254 xen_mc_issue(PARAVIRT_LAZY_MMU);
1255
1256 preempt_enable();
1257}
1258
1259static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001260 struct mm_struct *mm, unsigned long start,
1261 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001262{
1263 struct {
1264 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001265#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001266 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001267#else
1268 DECLARE_BITMAP(mask, NR_CPUS);
1269#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001270 } *args;
1271 struct multicall_space mcs;
1272
Alex Shie7b52ff2012-06-28 09:02:17 +08001273 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001274
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001275 if (cpumask_empty(cpus))
1276 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001277
1278 mcs = xen_mc_entry(sizeof(*args));
1279 args = mcs.args;
1280 args->op.arg2.vcpumask = to_cpumask(args->mask);
1281
1282 /* Remove us, and any offline CPUS. */
1283 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1284 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001285
Alex Shie7b52ff2012-06-28 09:02:17 +08001286 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001287 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001288 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001289 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001290 }
1291
1292 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1293
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001294 xen_mc_issue(PARAVIRT_LAZY_MMU);
1295}
1296
1297static unsigned long xen_read_cr3(void)
1298{
Alex Shi2113f462012-01-13 23:53:35 +08001299 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001300}
1301
1302static void set_current_cr3(void *v)
1303{
Alex Shi2113f462012-01-13 23:53:35 +08001304 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001305}
1306
1307static void __xen_write_cr3(bool kernel, unsigned long cr3)
1308{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001309 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001310 unsigned long mfn;
1311
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001312 trace_xen_mmu_write_cr3(kernel, cr3);
1313
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001314 if (cr3)
1315 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1316 else
1317 mfn = 0;
1318
1319 WARN_ON(mfn == 0 && kernel);
1320
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001321 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1322 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001323
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001324 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001325
1326 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001327 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001328
1329 /* Update xen_current_cr3 once the batch has actually
1330 been submitted. */
1331 xen_mc_callback(set_current_cr3, (void *)cr3);
1332 }
1333}
1334
1335static void xen_write_cr3(unsigned long cr3)
1336{
1337 BUG_ON(preemptible());
1338
1339 xen_mc_batch(); /* disables interrupts */
1340
1341 /* Update while interrupts are disabled, so its atomic with
1342 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001343 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001344
1345 __xen_write_cr3(true, cr3);
1346
1347#ifdef CONFIG_X86_64
1348 {
1349 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1350 if (user_pgd)
1351 __xen_write_cr3(false, __pa(user_pgd));
1352 else
1353 __xen_write_cr3(false, 0);
1354 }
1355#endif
1356
1357 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1358}
1359
1360static int xen_pgd_alloc(struct mm_struct *mm)
1361{
1362 pgd_t *pgd = mm->pgd;
1363 int ret = 0;
1364
1365 BUG_ON(PagePinned(virt_to_page(pgd)));
1366
1367#ifdef CONFIG_X86_64
1368 {
1369 struct page *page = virt_to_page(pgd);
1370 pgd_t *user_pgd;
1371
1372 BUG_ON(page->private != 0);
1373
1374 ret = -ENOMEM;
1375
1376 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1377 page->private = (unsigned long)user_pgd;
1378
1379 if (user_pgd != NULL) {
1380 user_pgd[pgd_index(VSYSCALL_START)] =
1381 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1382 ret = 0;
1383 }
1384
1385 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1386 }
1387#endif
1388
1389 return ret;
1390}
1391
1392static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1393{
1394#ifdef CONFIG_X86_64
1395 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1396
1397 if (user_pgd)
1398 free_page((unsigned long)user_pgd);
1399#endif
1400}
1401
Stefano Stabelliniee176452011-04-19 14:47:31 +01001402#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001403static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001404{
1405 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1406 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1407 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1408 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001409
1410 return pte;
1411}
1412#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001413static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001414{
1415 unsigned long pfn = pte_pfn(pte);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001416
1417 /*
1418 * If the new pfn is within the range of the newly allocated
1419 * kernel pagetable, and it isn't being mapped into an
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001420 * early_ioremap fixmap slot as a freshly allocated page, make sure
1421 * it is RO.
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001422 */
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001423 if (((!is_early_ioremap_ptep(ptep) &&
Stefano Stabellinib9269dc2011-04-12 12:19:49 +01001424 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001425 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001426 pte = pte_wrprotect(pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001427
1428 return pte;
1429}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001430#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001431
David Vrabeld095d432012-07-09 11:39:05 +01001432/*
1433 * Init-time set_pte while constructing initial pagetables, which
1434 * doesn't allow RO page table pages to be remapped RW.
1435 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001436 * If there is no MFN for this PFN then this page is initially
1437 * ballooned out so clear the PTE (as in decrease_reservation() in
1438 * drivers/xen/balloon.c).
1439 *
David Vrabeld095d432012-07-09 11:39:05 +01001440 * Many of these PTE updates are done on unpinned and writable pages
1441 * and doing a hypercall for these is unnecessary and expensive. At
1442 * this point it is not possible to tell if a page is pinned or not,
1443 * so always write the PTE directly and rely on Xen trapping and
1444 * emulating any updates as necessary.
1445 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001446static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001447{
David Vrabel66a27dd2012-07-09 11:39:06 +01001448 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1449 pte = mask_rw_pte(ptep, pte);
1450 else
1451 pte = __pte_ma(0);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001452
David Vrabeld095d432012-07-09 11:39:05 +01001453 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001454}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001455
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001456static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1457{
1458 struct mmuext_op op;
1459 op.cmd = cmd;
1460 op.arg1.mfn = pfn_to_mfn(pfn);
1461 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1462 BUG();
1463}
1464
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001465/* Early in boot, while setting up the initial pagetable, assume
1466 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001467static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001468{
1469#ifdef CONFIG_FLATMEM
1470 BUG_ON(mem_map); /* should only be used early */
1471#endif
1472 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001473 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1474}
1475
1476/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001477static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001478{
1479#ifdef CONFIG_FLATMEM
1480 BUG_ON(mem_map); /* should only be used early */
1481#endif
1482 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001483}
1484
1485/* Early release_pte assumes that all pts are pinned, since there's
1486 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001487static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001488{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001489 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001490 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1491}
1492
Daniel Kiper3f5089532011-05-12 17:19:53 -04001493static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001494{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001495 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001496}
1497
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001498static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1499{
1500 struct multicall_space mcs;
1501 struct mmuext_op *op;
1502
1503 mcs = __xen_mc_entry(sizeof(*op));
1504 op = mcs.args;
1505 op->cmd = cmd;
1506 op->arg1.mfn = pfn_to_mfn(pfn);
1507
1508 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1509}
1510
1511static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1512{
1513 struct multicall_space mcs;
1514 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1515
1516 mcs = __xen_mc_entry(0);
1517 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1518 pfn_pte(pfn, prot), 0);
1519}
1520
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001521/* This needs to make sure the new pte page is pinned iff its being
1522 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001523static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1524 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001525{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001526 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001527
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001528 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001529
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001530 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001531 struct page *page = pfn_to_page(pfn);
1532
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001533 SetPagePinned(page);
1534
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001535 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001536 xen_mc_batch();
1537
1538 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1539
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001540 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001541 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1542
1543 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001544 } else {
1545 /* make sure there are no stray mappings of
1546 this page */
1547 kmap_flush_unused();
1548 }
1549 }
1550}
1551
1552static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1553{
1554 xen_alloc_ptpage(mm, pfn, PT_PTE);
1555}
1556
1557static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1558{
1559 xen_alloc_ptpage(mm, pfn, PT_PMD);
1560}
1561
1562/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001563static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001564{
1565 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001566 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001567
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001568 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1569
1570 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001571 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001572 xen_mc_batch();
1573
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001574 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001575 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1576
1577 __set_pfn_prot(pfn, PAGE_KERNEL);
1578
1579 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001580 }
1581 ClearPagePinned(page);
1582 }
1583}
1584
1585static void xen_release_pte(unsigned long pfn)
1586{
1587 xen_release_ptpage(pfn, PT_PTE);
1588}
1589
1590static void xen_release_pmd(unsigned long pfn)
1591{
1592 xen_release_ptpage(pfn, PT_PMD);
1593}
1594
1595#if PAGETABLE_LEVELS == 4
1596static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1597{
1598 xen_alloc_ptpage(mm, pfn, PT_PUD);
1599}
1600
1601static void xen_release_pud(unsigned long pfn)
1602{
1603 xen_release_ptpage(pfn, PT_PUD);
1604}
1605#endif
1606
1607void __init xen_reserve_top(void)
1608{
1609#ifdef CONFIG_X86_32
1610 unsigned long top = HYPERVISOR_VIRT_START;
1611 struct xen_platform_parameters pp;
1612
1613 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1614 top = pp.virt_start;
1615
1616 reserve_top_address(-top);
1617#endif /* CONFIG_X86_32 */
1618}
1619
1620/*
1621 * Like __va(), but returns address in the kernel mapping (which is
1622 * all we have until the physical memory mapping has been set up.
1623 */
1624static void *__ka(phys_addr_t paddr)
1625{
1626#ifdef CONFIG_X86_64
1627 return (void *)(paddr + __START_KERNEL_map);
1628#else
1629 return __va(paddr);
1630#endif
1631}
1632
1633/* Convert a machine address to physical address */
1634static unsigned long m2p(phys_addr_t maddr)
1635{
1636 phys_addr_t paddr;
1637
1638 maddr &= PTE_PFN_MASK;
1639 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1640
1641 return paddr;
1642}
1643
1644/* Convert a machine address to kernel virtual */
1645static void *m2v(phys_addr_t maddr)
1646{
1647 return __ka(m2p(maddr));
1648}
1649
Juan Quintela4ec53872010-09-02 15:45:43 +01001650/* Set the page permissions on an identity-mapped pages */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001651static void set_page_prot(void *addr, pgprot_t prot)
1652{
1653 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1654 pte_t pte = pfn_pte(pfn, prot);
1655
1656 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1657 BUG();
1658}
1659
Daniel Kiper3f5089532011-05-12 17:19:53 -04001660static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001661{
1662 unsigned pmdidx, pteidx;
1663 unsigned ident_pte;
1664 unsigned long pfn;
1665
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001666 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1667 PAGE_SIZE);
1668
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001669 ident_pte = 0;
1670 pfn = 0;
1671 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1672 pte_t *pte_page;
1673
1674 /* Reuse or allocate a page of ptes */
1675 if (pmd_present(pmd[pmdidx]))
1676 pte_page = m2v(pmd[pmdidx].pmd);
1677 else {
1678 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001679 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001680 break;
1681
1682 pte_page = &level1_ident_pgt[ident_pte];
1683 ident_pte += PTRS_PER_PTE;
1684
1685 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1686 }
1687
1688 /* Install mappings */
1689 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1690 pte_t pte;
1691
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001692#ifdef CONFIG_X86_32
1693 if (pfn > max_pfn_mapped)
1694 max_pfn_mapped = pfn;
1695#endif
1696
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001697 if (!pte_none(pte_page[pteidx]))
1698 continue;
1699
1700 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1701 pte_page[pteidx] = pte;
1702 }
1703 }
1704
1705 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1706 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1707
1708 set_page_prot(pmd, PAGE_KERNEL_RO);
1709}
1710
Ian Campbell7e775062010-09-30 12:37:26 +01001711void __init xen_setup_machphys_mapping(void)
1712{
1713 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001714
1715 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1716 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001717 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001718 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001719 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001720 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001721#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001722 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1723 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001724#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001725}
1726
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001727#ifdef CONFIG_X86_64
1728static void convert_pfn_mfn(void *v)
1729{
1730 pte_t *pte = v;
1731 int i;
1732
1733 /* All levels are converted the same way, so just treat them
1734 as ptes. */
1735 for (i = 0; i < PTRS_PER_PTE; i++)
1736 pte[i] = xen_make_pte(pte[i].pte);
1737}
1738
1739/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001740 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001741 *
1742 * We can construct this by grafting the Xen provided pagetable into
1743 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1744 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1745 * means that only the kernel has a physical mapping to start with -
1746 * but that's enough to get __va working. We need to fill in the rest
1747 * of the physical mapping once some sort of allocator has been set
1748 * up.
1749 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001750pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001751 unsigned long max_pfn)
1752{
1753 pud_t *l3;
1754 pmd_t *l2;
1755
Stefano Stabellini14988a42011-02-18 11:32:40 +00001756 /* max_pfn_mapped is the last pfn mapped in the initial memory
1757 * mappings. Considering that on Xen after the kernel mappings we
1758 * have the mappings of some pages that don't exist in pfn space, we
1759 * set max_pfn_mapped to the last real pfn mapped. */
1760 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1761
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001762 /* Zap identity mapping */
1763 init_level4_pgt[0] = __pgd(0);
1764
1765 /* Pre-constructed entries are in pfn, so convert to mfn */
1766 convert_pfn_mfn(init_level4_pgt);
1767 convert_pfn_mfn(level3_ident_pgt);
1768 convert_pfn_mfn(level3_kernel_pgt);
1769
1770 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1771 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1772
1773 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1774 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1775
1776 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1777 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1778 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1779
1780 /* Set up identity map */
1781 xen_map_identity_early(level2_ident_pgt, max_pfn);
1782
1783 /* Make pagetable pieces RO */
1784 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1785 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1786 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1787 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1788 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1789 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1790
1791 /* Pin down new L4 */
1792 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1793 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1794
1795 /* Unpin Xen-provided one */
1796 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1797
1798 /* Switch over */
1799 pgd = init_level4_pgt;
1800
1801 /*
1802 * At this stage there can be no user pgd, and no page
1803 * structure to attach it to, so make sure we just set kernel
1804 * pgd.
1805 */
1806 xen_mc_batch();
1807 __xen_write_cr3(true, __pa(pgd));
1808 xen_mc_issue(PARAVIRT_LAZY_CPU);
1809
Tejun Heo24aa0782011-07-12 11:16:06 +02001810 memblock_reserve(__pa(xen_start_info->pt_base),
1811 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001812
1813 return pgd;
1814}
1815#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001816static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1817static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1818
Daniel Kiper3f5089532011-05-12 17:19:53 -04001819static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001820{
1821 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1822
1823 BUG_ON(read_cr3() != __pa(initial_page_table));
1824 BUG_ON(cr3 != __pa(swapper_pg_dir));
1825
1826 /*
1827 * We are switching to swapper_pg_dir for the first time (from
1828 * initial_page_table) and therefore need to mark that page
1829 * read-only and then pin it.
1830 *
1831 * Xen disallows sharing of kernel PMDs for PAE
1832 * guests. Therefore we must copy the kernel PMD from
1833 * initial_page_table into a new kernel PMD to be used in
1834 * swapper_pg_dir.
1835 */
1836 swapper_kernel_pmd =
1837 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1838 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
1839 sizeof(pmd_t) * PTRS_PER_PMD);
1840 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1841 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1842 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1843
1844 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1845 xen_write_cr3(cr3);
1846 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1847
1848 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1849 PFN_DOWN(__pa(initial_page_table)));
1850 set_page_prot(initial_page_table, PAGE_KERNEL);
1851 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1852
1853 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1854}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001855
Daniel Kiper3f5089532011-05-12 17:19:53 -04001856pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001857 unsigned long max_pfn)
1858{
1859 pmd_t *kernel_pmd;
1860
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001861 initial_kernel_pmd =
1862 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001863
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001864 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1865 xen_start_info->nr_pt_frames * PAGE_SIZE +
1866 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001867
1868 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001869 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001870
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001871 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001872
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001873 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1874 initial_page_table[KERNEL_PGD_BOUNDARY] =
1875 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001876
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001877 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1878 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001879 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1880
1881 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1882
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001883 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1884 PFN_DOWN(__pa(initial_page_table)));
1885 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001886
Tejun Heo24aa0782011-07-12 11:16:06 +02001887 memblock_reserve(__pa(xen_start_info->pt_base),
Konrad Rzeszutek Wilkdc6821e2012-01-07 21:27:38 -05001888 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07001889
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001890 return initial_page_table;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001891}
1892#endif /* CONFIG_X86_64 */
1893
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001894static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1895
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001896static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001897{
1898 pte_t pte;
1899
1900 phys >>= PAGE_SHIFT;
1901
1902 switch (idx) {
1903 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1904#ifdef CONFIG_X86_F00F_BUG
1905 case FIX_F00F_IDT:
1906#endif
1907#ifdef CONFIG_X86_32
1908 case FIX_WP_TEST:
1909 case FIX_VDSO:
1910# ifdef CONFIG_HIGHMEM
1911 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1912# endif
1913#else
1914 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04001915 case VVAR_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001916#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08001917 case FIX_TEXT_POKE0:
1918 case FIX_TEXT_POKE1:
1919 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001920 pte = pfn_pte(phys, prot);
1921 break;
1922
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001923#ifdef CONFIG_X86_LOCAL_APIC
1924 case FIX_APIC_BASE: /* maps dummy local APIC */
1925 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1926 break;
1927#endif
1928
1929#ifdef CONFIG_X86_IO_APIC
1930 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1931 /*
1932 * We just don't map the IO APIC - all access is via
1933 * hypercalls. Keep the address in the pte for reference.
1934 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04001935 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001936 break;
1937#endif
1938
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001939 case FIX_PARAVIRT_BOOTMAP:
1940 /* This is an MFN, but it isn't an IO mapping from the
1941 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001942 pte = mfn_pte(phys, prot);
1943 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001944
1945 default:
1946 /* By default, set_fixmap is used for hardware mappings */
1947 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1948 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001949 }
1950
1951 __native_set_fixmap(idx, pte);
1952
1953#ifdef CONFIG_X86_64
1954 /* Replicate changes to map the vsyscall page into the user
1955 pagetable vsyscall mapping. */
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04001956 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
1957 idx == VVAR_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001958 unsigned long vaddr = __fix_to_virt(idx);
1959 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1960 }
1961#endif
1962}
1963
Daniel Kiper3f5089532011-05-12 17:19:53 -04001964static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001965{
1966 pv_mmu_ops.set_pte = xen_set_pte;
1967 pv_mmu_ops.set_pmd = xen_set_pmd;
1968 pv_mmu_ops.set_pud = xen_set_pud;
1969#if PAGETABLE_LEVELS == 4
1970 pv_mmu_ops.set_pgd = xen_set_pgd;
1971#endif
1972
1973 /* This will work as long as patching hasn't happened yet
1974 (which it hasn't) */
1975 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1976 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1977 pv_mmu_ops.release_pte = xen_release_pte;
1978 pv_mmu_ops.release_pmd = xen_release_pmd;
1979#if PAGETABLE_LEVELS == 4
1980 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1981 pv_mmu_ops.release_pud = xen_release_pud;
1982#endif
1983
1984#ifdef CONFIG_X86_64
1985 SetPagePinned(virt_to_page(level3_user_vsyscall));
1986#endif
1987 xen_mark_init_mm_pinned();
1988}
1989
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001990static void xen_leave_lazy_mmu(void)
1991{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001992 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001993 xen_mc_flush();
1994 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001995 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001996}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001997
Daniel Kiper3f5089532011-05-12 17:19:53 -04001998static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001999 .read_cr2 = xen_read_cr2,
2000 .write_cr2 = xen_write_cr2,
2001
2002 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002003#ifdef CONFIG_X86_32
2004 .write_cr3 = xen_write_cr3_init,
2005#else
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002006 .write_cr3 = xen_write_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002007#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002008
2009 .flush_tlb_user = xen_flush_tlb,
2010 .flush_tlb_kernel = xen_flush_tlb,
2011 .flush_tlb_single = xen_flush_tlb_single,
2012 .flush_tlb_others = xen_flush_tlb_others,
2013
2014 .pte_update = paravirt_nop,
2015 .pte_update_defer = paravirt_nop,
2016
2017 .pgd_alloc = xen_pgd_alloc,
2018 .pgd_free = xen_pgd_free,
2019
2020 .alloc_pte = xen_alloc_pte_init,
2021 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002022 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002023 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002024
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002025 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002026 .set_pte_at = xen_set_pte_at,
2027 .set_pmd = xen_set_pmd_hyper,
2028
2029 .ptep_modify_prot_start = __ptep_modify_prot_start,
2030 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2031
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002032 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2033 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002034
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002035 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2036 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002037
2038#ifdef CONFIG_X86_PAE
2039 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002040 .pte_clear = xen_pte_clear,
2041 .pmd_clear = xen_pmd_clear,
2042#endif /* CONFIG_X86_PAE */
2043 .set_pud = xen_set_pud_hyper,
2044
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002045 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2046 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002047
2048#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002049 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2050 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002051 .set_pgd = xen_set_pgd_hyper,
2052
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002053 .alloc_pud = xen_alloc_pmd_init,
2054 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002055#endif /* PAGETABLE_LEVELS == 4 */
2056
2057 .activate_mm = xen_activate_mm,
2058 .dup_mmap = xen_dup_mmap,
2059 .exit_mmap = xen_exit_mmap,
2060
2061 .lazy_mode = {
2062 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002063 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002064 },
2065
2066 .set_fixmap = xen_set_fixmap,
2067};
2068
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002069void __init xen_init_mmu_ops(void)
2070{
Stefano Stabellini279b7062011-04-14 15:49:41 +01002071 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
Attilio Rao7737b212012-08-21 21:22:38 +01002072 x86_init.paging.pagetable_init = xen_pagetable_init;
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002073 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2074 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002075
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002076 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002077}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002078
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002079/* Protected by xen_reservation_lock. */
2080#define MAX_CONTIG_ORDER 9 /* 2MB */
2081static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2082
2083#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2084static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2085 unsigned long *in_frames,
2086 unsigned long *out_frames)
2087{
2088 int i;
2089 struct multicall_space mcs;
2090
2091 xen_mc_batch();
2092 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2093 mcs = __xen_mc_entry(0);
2094
2095 if (in_frames)
2096 in_frames[i] = virt_to_mfn(vaddr);
2097
2098 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002099 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002100
2101 if (out_frames)
2102 out_frames[i] = virt_to_pfn(vaddr);
2103 }
2104 xen_mc_issue(0);
2105}
2106
2107/*
2108 * Update the pfn-to-mfn mappings for a virtual address range, either to
2109 * point to an array of mfns, or contiguously from a single starting
2110 * mfn.
2111 */
2112static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2113 unsigned long *mfns,
2114 unsigned long first_mfn)
2115{
2116 unsigned i, limit;
2117 unsigned long mfn;
2118
2119 xen_mc_batch();
2120
2121 limit = 1u << order;
2122 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2123 struct multicall_space mcs;
2124 unsigned flags;
2125
2126 mcs = __xen_mc_entry(0);
2127 if (mfns)
2128 mfn = mfns[i];
2129 else
2130 mfn = first_mfn + i;
2131
2132 if (i < (limit - 1))
2133 flags = 0;
2134 else {
2135 if (order == 0)
2136 flags = UVMF_INVLPG | UVMF_ALL;
2137 else
2138 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2139 }
2140
2141 MULTI_update_va_mapping(mcs.mc, vaddr,
2142 mfn_pte(mfn, PAGE_KERNEL), flags);
2143
2144 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2145 }
2146
2147 xen_mc_issue(0);
2148}
2149
2150/*
2151 * Perform the hypercall to exchange a region of our pfns to point to
2152 * memory with the required contiguous alignment. Takes the pfns as
2153 * input, and populates mfns as output.
2154 *
2155 * Returns a success code indicating whether the hypervisor was able to
2156 * satisfy the request or not.
2157 */
2158static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2159 unsigned long *pfns_in,
2160 unsigned long extents_out,
2161 unsigned int order_out,
2162 unsigned long *mfns_out,
2163 unsigned int address_bits)
2164{
2165 long rc;
2166 int success;
2167
2168 struct xen_memory_exchange exchange = {
2169 .in = {
2170 .nr_extents = extents_in,
2171 .extent_order = order_in,
2172 .extent_start = pfns_in,
2173 .domid = DOMID_SELF
2174 },
2175 .out = {
2176 .nr_extents = extents_out,
2177 .extent_order = order_out,
2178 .extent_start = mfns_out,
2179 .address_bits = address_bits,
2180 .domid = DOMID_SELF
2181 }
2182 };
2183
2184 BUG_ON(extents_in << order_in != extents_out << order_out);
2185
2186 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2187 success = (exchange.nr_exchanged == extents_in);
2188
2189 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2190 BUG_ON(success && (rc != 0));
2191
2192 return success;
2193}
2194
2195int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2196 unsigned int address_bits)
2197{
2198 unsigned long *in_frames = discontig_frames, out_frame;
2199 unsigned long flags;
2200 int success;
2201
2202 /*
2203 * Currently an auto-translated guest will not perform I/O, nor will
2204 * it require PAE page directories below 4GB. Therefore any calls to
2205 * this function are redundant and can be ignored.
2206 */
2207
2208 if (xen_feature(XENFEAT_auto_translated_physmap))
2209 return 0;
2210
2211 if (unlikely(order > MAX_CONTIG_ORDER))
2212 return -ENOMEM;
2213
2214 memset((void *) vstart, 0, PAGE_SIZE << order);
2215
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002216 spin_lock_irqsave(&xen_reservation_lock, flags);
2217
2218 /* 1. Zap current PTEs, remembering MFNs. */
2219 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2220
2221 /* 2. Get a new contiguous memory extent. */
2222 out_frame = virt_to_pfn(vstart);
2223 success = xen_exchange_memory(1UL << order, 0, in_frames,
2224 1, order, &out_frame,
2225 address_bits);
2226
2227 /* 3. Map the new extent in place of old pages. */
2228 if (success)
2229 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2230 else
2231 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2232
2233 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2234
2235 return success ? 0 : -ENOMEM;
2236}
2237EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2238
2239void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2240{
2241 unsigned long *out_frames = discontig_frames, in_frame;
2242 unsigned long flags;
2243 int success;
2244
2245 if (xen_feature(XENFEAT_auto_translated_physmap))
2246 return;
2247
2248 if (unlikely(order > MAX_CONTIG_ORDER))
2249 return;
2250
2251 memset((void *) vstart, 0, PAGE_SIZE << order);
2252
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002253 spin_lock_irqsave(&xen_reservation_lock, flags);
2254
2255 /* 1. Find start MFN of contiguous extent. */
2256 in_frame = virt_to_mfn(vstart);
2257
2258 /* 2. Zap current PTEs. */
2259 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2260
2261 /* 3. Do the exchange for non-contiguous MFNs. */
2262 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2263 0, out_frames, 0);
2264
2265 /* 4. Map new pages in place of old pages. */
2266 if (success)
2267 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2268 else
2269 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2270
2271 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2272}
2273EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2274
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002275#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002276static void xen_hvm_exit_mmap(struct mm_struct *mm)
2277{
2278 struct xen_hvm_pagetable_dying a;
2279 int rc;
2280
2281 a.domid = DOMID_SELF;
2282 a.gpa = __pa(mm->pgd);
2283 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2284 WARN_ON_ONCE(rc < 0);
2285}
2286
2287static int is_pagetable_dying_supported(void)
2288{
2289 struct xen_hvm_pagetable_dying a;
2290 int rc = 0;
2291
2292 a.domid = DOMID_SELF;
2293 a.gpa = 0x00;
2294 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2295 if (rc < 0) {
2296 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2297 return 0;
2298 }
2299 return 1;
2300}
2301
2302void __init xen_hvm_init_mmu_ops(void)
2303{
2304 if (is_pagetable_dying_supported())
2305 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2306}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002307#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002308
Ian Campbellde1ef202009-05-21 10:09:46 +01002309#define REMAP_BATCH_SIZE 16
2310
2311struct remap_data {
2312 unsigned long mfn;
2313 pgprot_t prot;
2314 struct mmu_update *mmu_update;
2315};
2316
2317static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2318 unsigned long addr, void *data)
2319{
2320 struct remap_data *rmd = data;
2321 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2322
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002323 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002324 rmd->mmu_update->val = pte_val_ma(pte);
2325 rmd->mmu_update++;
2326
2327 return 0;
2328}
2329
2330int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2331 unsigned long addr,
2332 unsigned long mfn, int nr,
2333 pgprot_t prot, unsigned domid)
2334{
2335 struct remap_data rmd;
2336 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2337 int batch;
2338 unsigned long range;
2339 int err = 0;
2340
2341 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2342
Stefano Stabellinie060e7a2010-11-11 12:37:43 -08002343 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2344 (VM_PFNMAP | VM_RESERVED | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002345
2346 rmd.mfn = mfn;
2347 rmd.prot = prot;
2348
2349 while (nr) {
2350 batch = min(REMAP_BATCH_SIZE, nr);
2351 range = (unsigned long)batch << PAGE_SHIFT;
2352
2353 rmd.mmu_update = mmu_update;
2354 err = apply_to_page_range(vma->vm_mm, addr, range,
2355 remap_area_mfn_pte_fn, &rmd);
2356 if (err)
2357 goto out;
2358
2359 err = -EFAULT;
2360 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2361 goto out;
2362
2363 nr -= batch;
2364 addr += range;
2365 }
2366
2367 err = 0;
2368out:
2369
2370 flush_tlb_all();
2371
2372 return err;
2373}
2374EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);