blob: 21058ad1e5e30dbae1e472470b4765546a1fa873 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050
51#include <asm/pgtable.h>
52#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070053#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070054#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080055#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070056#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050057#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070058#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080059#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070060#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070061#include <asm/pat.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070062
63#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070064#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070065
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080066#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070067#include <xen/page.h>
68#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010069#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080070#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080071#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080072#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070073
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070074#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070075#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070076#include "debugfs.h"
77
78#define MMU_UPDATE_HISTO 30
79
Alex Nixon19001c82009-02-09 12:05:46 -080080/*
81 * Protects atomic reservation decrease/increase against concurrent increases.
82 * Also protects non-atomic updates of current_pages and driver_pages, and
83 * balloon lists.
84 */
85DEFINE_SPINLOCK(xen_reservation_lock);
86
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070087#ifdef CONFIG_XEN_DEBUG_FS
88
89static struct {
90 u32 pgd_update;
91 u32 pgd_update_pinned;
92 u32 pgd_update_batched;
93
94 u32 pud_update;
95 u32 pud_update_pinned;
96 u32 pud_update_batched;
97
98 u32 pmd_update;
99 u32 pmd_update_pinned;
100 u32 pmd_update_batched;
101
102 u32 pte_update;
103 u32 pte_update_pinned;
104 u32 pte_update_batched;
105
106 u32 mmu_update;
107 u32 mmu_update_extended;
108 u32 mmu_update_histo[MMU_UPDATE_HISTO];
109
110 u32 prot_commit;
111 u32 prot_commit_batched;
112
113 u32 set_pte_at;
114 u32 set_pte_at_batched;
115 u32 set_pte_at_pinned;
116 u32 set_pte_at_current;
117 u32 set_pte_at_kernel;
118} mmu_stats;
119
120static u8 zero_stats;
121
122static inline void check_zero(void)
123{
124 if (unlikely(zero_stats)) {
125 memset(&mmu_stats, 0, sizeof(mmu_stats));
126 zero_stats = 0;
127 }
128}
129
130#define ADD_STATS(elem, val) \
131 do { check_zero(); mmu_stats.elem += (val); } while(0)
132
133#else /* !CONFIG_XEN_DEBUG_FS */
134
135#define ADD_STATS(elem, val) do { (void)(val); } while(0)
136
137#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700138
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800139
140/*
141 * Identity map, in addition to plain kernel map. This needs to be
142 * large enough to allocate page table pages to allocate the rest.
143 * Each page can map 2MB.
144 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -0700145#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
146static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800147
148#ifdef CONFIG_X86_64
149/* l3 pud for userspace vsyscall mapping */
150static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
151#endif /* CONFIG_X86_64 */
152
153/*
154 * Note about cr3 (pagetable base) values:
155 *
156 * xen_cr3 contains the current logical cr3 value; it contains the
157 * last set cr3. This may not be the current effective cr3, because
158 * its update may be being lazily deferred. However, a vcpu looking
159 * at its own cr3 can use this value knowing that it everything will
160 * be self-consistent.
161 *
162 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
163 * hypercall to set the vcpu cr3 is complete (so it may be a little
164 * out of date, but it will never be set early). If one vcpu is
165 * looking at another vcpu's cr3 value, it should use this variable.
166 */
167DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
168DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
169
170
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700171/*
172 * Just beyond the highest usermode address. STACK_TOP_MAX has a
173 * redzone above it, so round it up to a PGD boundary.
174 */
175#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
176
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800177unsigned long arbitrary_virt_to_mfn(void *vaddr)
178{
179 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
180
181 return PFN_DOWN(maddr.maddr);
182}
183
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700184xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700185{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700186 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100187 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700188 pte_t *pte;
189 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700190
Chris Lalancette9f32d212008-10-23 17:40:25 -0700191 /*
192 * if the PFN is in the linear mapped vaddr range, we can just use
193 * the (quick) virt_to_machine() p2m lookup
194 */
195 if (virt_addr_valid(vaddr))
196 return virt_to_machine(vaddr);
197
198 /* otherwise we have to do a (slower) full page-table walk */
199
200 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700201 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700202 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700203 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700204}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100205EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700206
207void make_lowmem_page_readonly(void *vaddr)
208{
209 pte_t *pte, ptev;
210 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100211 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700212
Ingo Molnarf0646e42008-01-30 13:33:43 +0100213 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700214 if (pte == NULL)
215 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700216
217 ptev = pte_wrprotect(*pte);
218
219 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
220 BUG();
221}
222
223void make_lowmem_page_readwrite(void *vaddr)
224{
225 pte_t *pte, ptev;
226 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100227 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700228
Ingo Molnarf0646e42008-01-30 13:33:43 +0100229 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700230 if (pte == NULL)
231 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700232
233 ptev = pte_mkwrite(*pte);
234
235 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
236 BUG();
237}
238
239
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700240static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100241{
242 struct page *page = virt_to_page(ptr);
243
244 return PagePinned(page);
245}
246
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800247static bool xen_iomap_pte(pte_t pte)
248{
Alex Nixon7347b402010-02-19 13:31:06 -0500249 return pte_flags(pte) & _PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800250}
251
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800252void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800253{
254 struct multicall_space mcs;
255 struct mmu_update *u;
256
257 mcs = xen_mc_entry(sizeof(*u));
258 u = mcs.args;
259
260 /* ptep might be kmapped when using 32-bit HIGHPTE */
261 u->ptr = arbitrary_virt_to_machine(ptep).maddr;
262 u->val = pte_val_ma(pteval);
263
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800264 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800265
266 xen_mc_issue(PARAVIRT_LAZY_MMU);
267}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800268EXPORT_SYMBOL_GPL(xen_set_domain_pte);
269
270static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
271{
272 xen_set_domain_pte(ptep, pteval, DOMID_IO);
273}
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800274
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700275static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700276{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700277 struct multicall_space mcs;
278 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700279
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700280 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
281
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700282 if (mcs.mc != NULL) {
283 ADD_STATS(mmu_update_extended, 1);
284 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
285
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700286 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700287
288 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
289 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
290 else
291 ADD_STATS(mmu_update_histo[0], 1);
292 } else {
293 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700294 mcs = __xen_mc_entry(sizeof(*u));
295 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700296 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700297 }
298
299 u = mcs.args;
300 *u = *update;
301}
302
303void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
304{
305 struct mmu_update u;
306
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700307 preempt_disable();
308
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700309 xen_mc_batch();
310
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700311 /* ptr may be ioremapped for 64-bit pagetable setup */
312 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700313 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700314 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700315
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700316 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
317
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700318 xen_mc_issue(PARAVIRT_LAZY_MMU);
319
320 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700321}
322
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100323void xen_set_pmd(pmd_t *ptr, pmd_t val)
324{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700325 ADD_STATS(pmd_update, 1);
326
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100327 /* If page is not pinned, we can just update the entry
328 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700329 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100330 *ptr = val;
331 return;
332 }
333
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700334 ADD_STATS(pmd_update_pinned, 1);
335
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100336 xen_set_pmd_hyper(ptr, val);
337}
338
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700339/*
340 * Associate a virtual page frame with a given physical page frame
341 * and protection flags for that frame.
342 */
343void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
344{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700345 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700346}
347
348void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
349 pte_t *ptep, pte_t pteval)
350{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800351 if (xen_iomap_pte(pteval)) {
352 xen_set_iomap_pte(ptep, pteval);
353 goto out;
354 }
355
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700356 ADD_STATS(set_pte_at, 1);
357// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
358 ADD_STATS(set_pte_at_current, mm == current->mm);
359 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
360
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700361 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700362 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700363 struct multicall_space mcs;
364 mcs = xen_mc_entry(0);
365
366 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700367 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700368 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700369 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700370 } else
371 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700372 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700373 }
374 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700375
Jeremy Fitzhardinge2829b442009-02-17 23:53:19 -0800376out: return;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700377}
378
Tejf63c2f22008-12-16 11:56:06 -0800379pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
380 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700381{
382 /* Just return the pte as-is. We preserve the bits on commit */
383 return *ptep;
384}
385
386void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
387 pte_t *ptep, pte_t pte)
388{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700389 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700390
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700391 xen_mc_batch();
392
Chris Lalancette9f32d212008-10-23 17:40:25 -0700393 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700394 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700395 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700396
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700397 ADD_STATS(prot_commit, 1);
398 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
399
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700400 xen_mc_issue(PARAVIRT_LAZY_MMU);
401}
402
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700403/* Assume pteval_t is equivalent to all the other *val_t types. */
404static pteval_t pte_mfn_to_pfn(pteval_t val)
405{
406 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700407 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700408 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700409 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700410 }
411
412 return val;
413}
414
415static pteval_t pte_pfn_to_mfn(pteval_t val)
416{
417 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700418 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700419 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500420 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700421
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500422 if (!xen_feature(XENFEAT_auto_translated_physmap))
423 mfn = get_phys_to_machine(pfn);
424 else
425 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700426 /*
427 * If there's no mfn for the pfn, then just create an
428 * empty non-present pte. Unfortunately this loses
429 * information about the original pfn, so
430 * pte_mfn_to_pfn is asymmetric.
431 */
432 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
433 mfn = 0;
434 flags = 0;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500435 } else {
436 /*
437 * Paramount to do this test _after_ the
438 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
439 * IDENTITY_FRAME_BIT resolves to true.
440 */
441 mfn &= ~FOREIGN_FRAME_BIT;
442 if (mfn & IDENTITY_FRAME_BIT) {
443 mfn &= ~IDENTITY_FRAME_BIT;
444 flags |= _PAGE_IOMAP;
445 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700446 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700447 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700448 }
449
450 return val;
451}
452
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800453static pteval_t iomap_pte(pteval_t val)
454{
455 if (val & _PAGE_PRESENT) {
456 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
457 pteval_t flags = val & PTE_FLAGS_MASK;
458
459 /* We assume the pte frame number is a MFN, so
460 just use it as-is. */
461 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
462 }
463
464 return val;
465}
466
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700467pteval_t xen_pte_val(pte_t pte)
468{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700469 pteval_t pteval = pte.pte;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800470
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700471 /* If this is a WC pte, convert back from Xen WC to Linux WC */
472 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
473 WARN_ON(!pat_enabled);
474 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
475 }
476
477 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
478 return pteval;
479
480 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700481}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800482PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700483
484pgdval_t xen_pgd_val(pgd_t pgd)
485{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700486 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700487}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800488PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700489
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700490/*
491 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
492 * are reserved for now, to correspond to the Intel-reserved PAT
493 * types.
494 *
495 * We expect Linux's PAT set as follows:
496 *
497 * Idx PTE flags Linux Xen Default
498 * 0 WB WB WB
499 * 1 PWT WC WT WT
500 * 2 PCD UC- UC- UC-
501 * 3 PCD PWT UC UC UC
502 * 4 PAT WB WC WB
503 * 5 PAT PWT WC WP WT
504 * 6 PAT PCD UC- UC UC-
505 * 7 PAT PCD PWT UC UC UC
506 */
507
508void xen_set_pat(u64 pat)
509{
510 /* We expect Linux to use a PAT setting of
511 * UC UC- WC WB (ignoring the PAT flag) */
512 WARN_ON(pat != 0x0007010600070106ull);
513}
514
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700515pte_t xen_make_pte(pteval_t pte)
516{
Alex Nixon7347b402010-02-19 13:31:06 -0500517 phys_addr_t addr = (pte & PTE_PFN_MASK);
518
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700519 /* If Linux is trying to set a WC pte, then map to the Xen WC.
520 * If _PAGE_PAT is set, then it probably means it is really
521 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
522 * things work out OK...
523 *
524 * (We should never see kernel mappings with _PAGE_PSE set,
525 * but we could see hugetlbfs mappings, I think.).
526 */
527 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
528 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
529 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
530 }
531
Alex Nixon7347b402010-02-19 13:31:06 -0500532 /*
533 * Unprivileged domains are allowed to do IOMAPpings for
534 * PCI passthrough, but not map ISA space. The ISA
535 * mappings are just dummy local mappings to keep other
536 * parts of the kernel happy.
537 */
538 if (unlikely(pte & _PAGE_IOMAP) &&
539 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800540 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500541 } else {
542 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800543 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500544 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800545
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700546 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700547}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800548PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700549
Konrad Rzeszutek Wilkfc251512010-12-23 16:25:29 -0500550#ifdef CONFIG_XEN_DEBUG
551pte_t xen_make_pte_debug(pteval_t pte)
552{
553 phys_addr_t addr = (pte & PTE_PFN_MASK);
554 phys_addr_t other_addr;
555 bool io_page = false;
556 pte_t _pte;
557
558 if (pte & _PAGE_IOMAP)
559 io_page = true;
560
561 _pte = xen_make_pte(pte);
562
563 if (!addr)
564 return _pte;
565
566 if (io_page &&
567 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
568 other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
569 WARN(addr != other_addr,
570 "0x%lx is using VM_IO, but it is 0x%lx!\n",
571 (unsigned long)addr, (unsigned long)other_addr);
572 } else {
573 pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
574 other_addr = (_pte.pte & PTE_PFN_MASK);
575 WARN((addr == other_addr) && (!io_page) && (!iomap_set),
576 "0x%lx is missing VM_IO (and wasn't fixed)!\n",
577 (unsigned long)addr);
578 }
579
580 return _pte;
581}
582PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
583#endif
584
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700585pgd_t xen_make_pgd(pgdval_t pgd)
586{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700587 pgd = pte_pfn_to_mfn(pgd);
588 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700589}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800590PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700591
592pmdval_t xen_pmd_val(pmd_t pmd)
593{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700594 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700595}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800596PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100597
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100598void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700599{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700600 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700601
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700602 preempt_disable();
603
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700604 xen_mc_batch();
605
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700606 /* ptr may be ioremapped for 64-bit pagetable setup */
607 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700608 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700609 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700610
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700611 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
612
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700613 xen_mc_issue(PARAVIRT_LAZY_MMU);
614
615 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700616}
617
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100618void xen_set_pud(pud_t *ptr, pud_t val)
619{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700620 ADD_STATS(pud_update, 1);
621
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100622 /* If page is not pinned, we can just update the entry
623 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700624 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100625 *ptr = val;
626 return;
627 }
628
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700629 ADD_STATS(pud_update_pinned, 1);
630
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100631 xen_set_pud_hyper(ptr, val);
632}
633
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700634void xen_set_pte(pte_t *ptep, pte_t pte)
635{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800636 if (xen_iomap_pte(pte)) {
637 xen_set_iomap_pte(ptep, pte);
638 return;
639 }
640
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700641 ADD_STATS(pte_update, 1);
642// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
643 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
644
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700645#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700646 ptep->pte_high = pte.pte_high;
647 smp_wmb();
648 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700649#else
650 *ptep = pte;
651#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700652}
653
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700654#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700655void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
656{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800657 if (xen_iomap_pte(pte)) {
658 xen_set_iomap_pte(ptep, pte);
659 return;
660 }
661
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700662 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700663}
664
665void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
666{
667 ptep->pte_low = 0;
668 smp_wmb(); /* make sure low gets written first */
669 ptep->pte_high = 0;
670}
671
672void xen_pmd_clear(pmd_t *pmdp)
673{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100674 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700675}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700676#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700677
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700678pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700679{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700680 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700681 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700682}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800683PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700684
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700685#if PAGETABLE_LEVELS == 4
686pudval_t xen_pud_val(pud_t pud)
687{
688 return pte_mfn_to_pfn(pud.pud);
689}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800690PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700691
692pud_t xen_make_pud(pudval_t pud)
693{
694 pud = pte_pfn_to_mfn(pud);
695
696 return native_make_pud(pud);
697}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800698PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700699
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700700pgd_t *xen_get_user_pgd(pgd_t *pgd)
701{
702 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
703 unsigned offset = pgd - pgd_page;
704 pgd_t *user_ptr = NULL;
705
706 if (offset < pgd_index(USER_LIMIT)) {
707 struct page *page = virt_to_page(pgd_page);
708 user_ptr = (pgd_t *)page->private;
709 if (user_ptr)
710 user_ptr += offset;
711 }
712
713 return user_ptr;
714}
715
716static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700717{
718 struct mmu_update u;
719
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700720 u.ptr = virt_to_machine(ptr).maddr;
721 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700722 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700723}
724
725/*
726 * Raw hypercall-based set_pgd, intended for in early boot before
727 * there's a page structure. This implies:
728 * 1. The only existing pagetable is the kernel's
729 * 2. It is always pinned
730 * 3. It has no user pagetable attached to it
731 */
732void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
733{
734 preempt_disable();
735
736 xen_mc_batch();
737
738 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700739
740 xen_mc_issue(PARAVIRT_LAZY_MMU);
741
742 preempt_enable();
743}
744
745void xen_set_pgd(pgd_t *ptr, pgd_t val)
746{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700747 pgd_t *user_ptr = xen_get_user_pgd(ptr);
748
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700749 ADD_STATS(pgd_update, 1);
750
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700751 /* If page is not pinned, we can just update the entry
752 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700753 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700754 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700755 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700756 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700757 *user_ptr = val;
758 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700759 return;
760 }
761
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700762 ADD_STATS(pgd_update_pinned, 1);
763 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
764
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700765 /* If it's pinned, then we can at least batch the kernel and
766 user updates together. */
767 xen_mc_batch();
768
769 __xen_set_pgd_hyper(ptr, val);
770 if (user_ptr)
771 __xen_set_pgd_hyper(user_ptr, val);
772
773 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700774}
775#endif /* PAGETABLE_LEVELS == 4 */
776
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700777/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700778 * (Yet another) pagetable walker. This one is intended for pinning a
779 * pagetable. This means that it walks a pagetable and calls the
780 * callback function on each page it finds making up the page table,
781 * at every level. It walks the entire pagetable, but it only bothers
782 * pinning pte pages which are below limit. In the normal case this
783 * will be STACK_TOP_MAX, but at boot we need to pin up to
784 * FIXADDR_TOP.
785 *
786 * For 32-bit the important bit is that we don't pin beyond there,
787 * because then we start getting into Xen's ptes.
788 *
789 * For 64-bit, we must skip the Xen hole in the middle of the address
790 * space, just after the big x86-64 virtual hole.
791 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000792static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
793 int (*func)(struct mm_struct *mm, struct page *,
794 enum pt_level),
795 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700796{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700797 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700798 unsigned hole_low, hole_high;
799 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
800 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700801
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700802 /* The limit is the last byte to be touched */
803 limit--;
804 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700805
806 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700807 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700808
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700809 /*
810 * 64-bit has a great big hole in the middle of the address
811 * space, which contains the Xen mappings. On 32-bit these
812 * will end up making a zero-sized hole and so is a no-op.
813 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700814 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700815 hole_high = pgd_index(PAGE_OFFSET);
816
817 pgdidx_limit = pgd_index(limit);
818#if PTRS_PER_PUD > 1
819 pudidx_limit = pud_index(limit);
820#else
821 pudidx_limit = 0;
822#endif
823#if PTRS_PER_PMD > 1
824 pmdidx_limit = pmd_index(limit);
825#else
826 pmdidx_limit = 0;
827#endif
828
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700829 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700830 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700831
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700832 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700833 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700834
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700835 if (!pgd_val(pgd[pgdidx]))
836 continue;
837
838 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700839
840 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700841 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700842
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700843 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700844 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700845
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700846 if (pgdidx == pgdidx_limit &&
847 pudidx > pudidx_limit)
848 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700849
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700850 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700851 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700852
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700853 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700854
855 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700856 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700857
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700858 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
859 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700860
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700861 if (pgdidx == pgdidx_limit &&
862 pudidx == pudidx_limit &&
863 pmdidx > pmdidx_limit)
864 goto out;
865
866 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700867 continue;
868
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700869 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700870 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700871 }
872 }
873 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700874
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700875out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700876 /* Do the top level last, so that the callbacks can use it as
877 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700878 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700879
880 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700881}
882
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000883static int xen_pgd_walk(struct mm_struct *mm,
884 int (*func)(struct mm_struct *mm, struct page *,
885 enum pt_level),
886 unsigned long limit)
887{
888 return __xen_pgd_walk(mm, mm->pgd, func, limit);
889}
890
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700891/* If we're using split pte locks, then take the page's lock and
892 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700893static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700894{
895 spinlock_t *ptl = NULL;
896
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700897#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700898 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700899 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700900#endif
901
902 return ptl;
903}
904
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700905static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700906{
907 spinlock_t *ptl = v;
908 spin_unlock(ptl);
909}
910
911static void xen_do_pin(unsigned level, unsigned long pfn)
912{
913 struct mmuext_op *op;
914 struct multicall_space mcs;
915
916 mcs = __xen_mc_entry(sizeof(*op));
917 op = mcs.args;
918 op->cmd = level;
919 op->arg1.mfn = pfn_to_mfn(pfn);
920 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
921}
922
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700923static int xen_pin_page(struct mm_struct *mm, struct page *page,
924 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700925{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700926 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700927 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700928
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700929 if (pgfl)
930 flush = 0; /* already pinned */
931 else if (PageHighMem(page))
932 /* kmaps need flushing if we found an unpinned
933 highpage */
934 flush = 1;
935 else {
936 void *pt = lowmem_page_address(page);
937 unsigned long pfn = page_to_pfn(page);
938 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700939 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700940
941 flush = 0;
942
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700943 /*
944 * We need to hold the pagetable lock between the time
945 * we make the pagetable RO and when we actually pin
946 * it. If we don't, then other users may come in and
947 * attempt to update the pagetable by writing it,
948 * which will fail because the memory is RO but not
949 * pinned, so Xen won't do the trap'n'emulate.
950 *
951 * If we're using split pte locks, we can't hold the
952 * entire pagetable's worth of locks during the
953 * traverse, because we may wrap the preempt count (8
954 * bits). The solution is to mark RO and pin each PTE
955 * page while holding the lock. This means the number
956 * of locks we end up holding is never more than a
957 * batch size (~32 entries, at present).
958 *
959 * If we're not using split pte locks, we needn't pin
960 * the PTE pages independently, because we're
961 * protected by the overall pagetable lock.
962 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700963 ptl = NULL;
964 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700965 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700966
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700967 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
968 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700969 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
970
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700971 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700972 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
973
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700974 /* Queue a deferred unlock for when this batch
975 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700976 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700977 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700978 }
979
980 return flush;
981}
982
983/* This is called just after a mm has been created, but it has not
984 been used yet. We need to make sure that its pagetable is all
985 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700986static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700987{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700988 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700989
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000990 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100991 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700992 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100993
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700994 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100995
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700996 xen_mc_batch();
997 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700998
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700999#ifdef CONFIG_X86_64
1000 {
1001 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1002
1003 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
1004
1005 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001006 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -08001007 xen_do_pin(MMUEXT_PIN_L4_TABLE,
1008 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001009 }
1010 }
1011#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001012#ifdef CONFIG_X86_PAE
1013 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001014 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001015 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001016#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +01001017 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001018#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001019 xen_mc_issue(0);
1020}
1021
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001022static void xen_pgd_pin(struct mm_struct *mm)
1023{
1024 __xen_pgd_pin(mm, mm->pgd);
1025}
1026
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001027/*
1028 * On save, we need to pin all pagetables to make sure they get their
1029 * mfns turned into pfns. Search the list for any unpinned pgds and pin
1030 * them (unpinned pgds are not currently in use, probably because the
1031 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001032 *
1033 * Expected to be called in stop_machine() ("equivalent to taking
1034 * every spinlock in the system"), so the locking doesn't really
1035 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001036 */
1037void xen_mm_pin_all(void)
1038{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001039 struct page *page;
1040
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001041 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001042
1043 list_for_each_entry(page, &pgd_list, lru) {
1044 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001045 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001046 SetPageSavePinned(page);
1047 }
1048 }
1049
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001050 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001051}
1052
Eduardo Habkostc1f2f092008-07-08 15:06:24 -07001053/*
1054 * The init_mm pagetable is really pinned as soon as its created, but
1055 * that's before we have page structures to store the bits. So do all
1056 * the book-keeping now.
1057 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001058static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1059 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001060{
1061 SetPagePinned(page);
1062 return 0;
1063}
1064
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001065static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001066{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001067 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001068}
1069
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001070static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1071 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001072{
Christoph Lameterd60cd462008-04-28 02:12:51 -07001073 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001074
1075 if (pgfl && !PageHighMem(page)) {
1076 void *pt = lowmem_page_address(page);
1077 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001078 spinlock_t *ptl = NULL;
1079 struct multicall_space mcs;
1080
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001081 /*
1082 * Do the converse to pin_page. If we're using split
1083 * pte locks, we must be holding the lock for while
1084 * the pte page is unpinned but still RO to prevent
1085 * concurrent updates from seeing it in this
1086 * partially-pinned state.
1087 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001088 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001089 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001090
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001091 if (ptl)
1092 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001093 }
1094
1095 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001096
1097 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1098 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001099 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1100
1101 if (ptl) {
1102 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001103 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001104 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001105 }
1106
1107 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001108}
1109
1110/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001111static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001112{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001113 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001114
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001115 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001116
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001117#ifdef CONFIG_X86_64
1118 {
1119 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1120
1121 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001122 xen_do_pin(MMUEXT_UNPIN_TABLE,
1123 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001124 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001125 }
1126 }
1127#endif
1128
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001129#ifdef CONFIG_X86_PAE
1130 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001131 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001132 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001133#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001134
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001135 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001136
1137 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001138}
1139
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001140static void xen_pgd_unpin(struct mm_struct *mm)
1141{
1142 __xen_pgd_unpin(mm, mm->pgd);
1143}
1144
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001145/*
1146 * On resume, undo any pinning done at save, so that the rest of the
1147 * kernel doesn't see any unexpected pinned pagetables.
1148 */
1149void xen_mm_unpin_all(void)
1150{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001151 struct page *page;
1152
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001153 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001154
1155 list_for_each_entry(page, &pgd_list, lru) {
1156 if (PageSavePinned(page)) {
1157 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001158 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001159 ClearPageSavePinned(page);
1160 }
1161 }
1162
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001163 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001164}
1165
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001166void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1167{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001168 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001169 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001170 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001171}
1172
1173void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1174{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001175 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001176 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001177 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001178}
1179
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001180
1181#ifdef CONFIG_SMP
1182/* Another cpu may still have their %cr3 pointing at the pagetable, so
1183 we need to repoint it somewhere else before we can unpin it. */
1184static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001185{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001186 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001187 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001188
Brian Gerst9eb912d2009-01-19 00:38:57 +09001189 active_mm = percpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001190
1191 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001192 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001193
1194 /* If this cpu still has a stale cr3 reference, then make sure
1195 it has been flushed. */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -08001196 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001197 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001198}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001199
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001200static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001201{
Mike Travise4d98202008-12-16 17:34:05 -08001202 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001203 unsigned cpu;
1204
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001205 if (current->active_mm == mm) {
1206 if (current->mm == mm)
1207 load_cr3(swapper_pg_dir);
1208 else
1209 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001210 }
1211
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001212 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001213 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1214 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001215 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001216 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1217 continue;
1218 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1219 }
1220 return;
1221 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001222 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001223
1224 /* It's possible that a vcpu may have a stale reference to our
1225 cr3, because its in lazy mode, and it hasn't yet flushed
1226 its set of pending hypercalls yet. In this case, we can
1227 look at its actual current cr3 value, and force it to flush
1228 if needed. */
1229 for_each_online_cpu(cpu) {
1230 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001231 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001232 }
1233
Mike Travise4d98202008-12-16 17:34:05 -08001234 if (!cpumask_empty(mask))
1235 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1236 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001237}
1238#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001239static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001240{
1241 if (current->active_mm == mm)
1242 load_cr3(swapper_pg_dir);
1243}
1244#endif
1245
1246/*
1247 * While a process runs, Xen pins its pagetables, which means that the
1248 * hypervisor forces it to be read-only, and it controls all updates
1249 * to it. This means that all pagetable updates have to go via the
1250 * hypervisor, which is moderately expensive.
1251 *
1252 * Since we're pulling the pagetable down, we switch to use init_mm,
1253 * unpin old process pagetable and mark it all read-write, which
1254 * allows further operations on it to be simple memory accesses.
1255 *
1256 * The only subtle point is that another CPU may be still using the
1257 * pagetable because of lazy tlb flushing. This means we need need to
1258 * switch all CPUs off this pagetable before we can unpin it.
1259 */
1260void xen_exit_mmap(struct mm_struct *mm)
1261{
1262 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001263 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001264 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001265
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001266 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001267
1268 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001269 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001270 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001271
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001272 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001273}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001274
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001275static __init void xen_pagetable_setup_start(pgd_t *base)
1276{
1277}
1278
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001279static void xen_post_allocator_init(void);
1280
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001281static __init void xen_pagetable_setup_done(pgd_t *base)
1282{
1283 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001284 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001285}
1286
1287static void xen_write_cr2(unsigned long cr2)
1288{
1289 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1290}
1291
1292static unsigned long xen_read_cr2(void)
1293{
1294 return percpu_read(xen_vcpu)->arch.cr2;
1295}
1296
1297unsigned long xen_read_cr2_direct(void)
1298{
1299 return percpu_read(xen_vcpu_info.arch.cr2);
1300}
1301
1302static void xen_flush_tlb(void)
1303{
1304 struct mmuext_op *op;
1305 struct multicall_space mcs;
1306
1307 preempt_disable();
1308
1309 mcs = xen_mc_entry(sizeof(*op));
1310
1311 op = mcs.args;
1312 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1313 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1314
1315 xen_mc_issue(PARAVIRT_LAZY_MMU);
1316
1317 preempt_enable();
1318}
1319
1320static void xen_flush_tlb_single(unsigned long addr)
1321{
1322 struct mmuext_op *op;
1323 struct multicall_space mcs;
1324
1325 preempt_disable();
1326
1327 mcs = xen_mc_entry(sizeof(*op));
1328 op = mcs.args;
1329 op->cmd = MMUEXT_INVLPG_LOCAL;
1330 op->arg1.linear_addr = addr & PAGE_MASK;
1331 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1332
1333 xen_mc_issue(PARAVIRT_LAZY_MMU);
1334
1335 preempt_enable();
1336}
1337
1338static void xen_flush_tlb_others(const struct cpumask *cpus,
1339 struct mm_struct *mm, unsigned long va)
1340{
1341 struct {
1342 struct mmuext_op op;
1343 DECLARE_BITMAP(mask, NR_CPUS);
1344 } *args;
1345 struct multicall_space mcs;
1346
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001347 if (cpumask_empty(cpus))
1348 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001349
1350 mcs = xen_mc_entry(sizeof(*args));
1351 args = mcs.args;
1352 args->op.arg2.vcpumask = to_cpumask(args->mask);
1353
1354 /* Remove us, and any offline CPUS. */
1355 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1356 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001357
1358 if (va == TLB_FLUSH_ALL) {
1359 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1360 } else {
1361 args->op.cmd = MMUEXT_INVLPG_MULTI;
1362 args->op.arg1.linear_addr = va;
1363 }
1364
1365 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1366
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001367 xen_mc_issue(PARAVIRT_LAZY_MMU);
1368}
1369
1370static unsigned long xen_read_cr3(void)
1371{
1372 return percpu_read(xen_cr3);
1373}
1374
1375static void set_current_cr3(void *v)
1376{
1377 percpu_write(xen_current_cr3, (unsigned long)v);
1378}
1379
1380static void __xen_write_cr3(bool kernel, unsigned long cr3)
1381{
1382 struct mmuext_op *op;
1383 struct multicall_space mcs;
1384 unsigned long mfn;
1385
1386 if (cr3)
1387 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1388 else
1389 mfn = 0;
1390
1391 WARN_ON(mfn == 0 && kernel);
1392
1393 mcs = __xen_mc_entry(sizeof(*op));
1394
1395 op = mcs.args;
1396 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1397 op->arg1.mfn = mfn;
1398
1399 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1400
1401 if (kernel) {
1402 percpu_write(xen_cr3, cr3);
1403
1404 /* Update xen_current_cr3 once the batch has actually
1405 been submitted. */
1406 xen_mc_callback(set_current_cr3, (void *)cr3);
1407 }
1408}
1409
1410static void xen_write_cr3(unsigned long cr3)
1411{
1412 BUG_ON(preemptible());
1413
1414 xen_mc_batch(); /* disables interrupts */
1415
1416 /* Update while interrupts are disabled, so its atomic with
1417 respect to ipis */
1418 percpu_write(xen_cr3, cr3);
1419
1420 __xen_write_cr3(true, cr3);
1421
1422#ifdef CONFIG_X86_64
1423 {
1424 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1425 if (user_pgd)
1426 __xen_write_cr3(false, __pa(user_pgd));
1427 else
1428 __xen_write_cr3(false, 0);
1429 }
1430#endif
1431
1432 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1433}
1434
1435static int xen_pgd_alloc(struct mm_struct *mm)
1436{
1437 pgd_t *pgd = mm->pgd;
1438 int ret = 0;
1439
1440 BUG_ON(PagePinned(virt_to_page(pgd)));
1441
1442#ifdef CONFIG_X86_64
1443 {
1444 struct page *page = virt_to_page(pgd);
1445 pgd_t *user_pgd;
1446
1447 BUG_ON(page->private != 0);
1448
1449 ret = -ENOMEM;
1450
1451 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1452 page->private = (unsigned long)user_pgd;
1453
1454 if (user_pgd != NULL) {
1455 user_pgd[pgd_index(VSYSCALL_START)] =
1456 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1457 ret = 0;
1458 }
1459
1460 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1461 }
1462#endif
1463
1464 return ret;
1465}
1466
1467static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1468{
1469#ifdef CONFIG_X86_64
1470 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1471
1472 if (user_pgd)
1473 free_page((unsigned long)user_pgd);
1474#endif
1475}
1476
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001477static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1478{
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001479 unsigned long pfn = pte_pfn(pte);
1480
1481#ifdef CONFIG_X86_32
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001482 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1483 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1484 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1485 pte_val_ma(pte));
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001486#endif
1487
1488 /*
1489 * If the new pfn is within the range of the newly allocated
1490 * kernel pagetable, and it isn't being mapped into an
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001491 * early_ioremap fixmap slot as a freshly allocated page, make sure
1492 * it is RO.
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001493 */
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001494 if (((!is_early_ioremap_ptep(ptep) &&
1495 pfn >= pgt_buf_start && pfn < pgt_buf_end)) ||
1496 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001497 pte = pte_wrprotect(pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001498
1499 return pte;
1500}
1501
1502/* Init-time set_pte while constructing initial pagetables, which
1503 doesn't allow RO pagetable pages to be remapped RW */
1504static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1505{
1506 pte = mask_rw_pte(ptep, pte);
1507
1508 xen_set_pte(ptep, pte);
1509}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001510
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001511static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1512{
1513 struct mmuext_op op;
1514 op.cmd = cmd;
1515 op.arg1.mfn = pfn_to_mfn(pfn);
1516 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1517 BUG();
1518}
1519
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001520/* Early in boot, while setting up the initial pagetable, assume
1521 everything is pinned. */
1522static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1523{
1524#ifdef CONFIG_FLATMEM
1525 BUG_ON(mem_map); /* should only be used early */
1526#endif
1527 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001528 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1529}
1530
1531/* Used for pmd and pud */
1532static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1533{
1534#ifdef CONFIG_FLATMEM
1535 BUG_ON(mem_map); /* should only be used early */
1536#endif
1537 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001538}
1539
1540/* Early release_pte assumes that all pts are pinned, since there's
1541 only init_mm and anything attached to that is pinned. */
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001542static __init void xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001543{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001544 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001545 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1546}
1547
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001548static __init void xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001549{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001550 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001551}
1552
1553/* This needs to make sure the new pte page is pinned iff its being
1554 attached to a pinned pagetable. */
1555static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1556{
1557 struct page *page = pfn_to_page(pfn);
1558
1559 if (PagePinned(virt_to_page(mm->pgd))) {
1560 SetPagePinned(page);
1561
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001562 if (!PageHighMem(page)) {
1563 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1564 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1565 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1566 } else {
1567 /* make sure there are no stray mappings of
1568 this page */
1569 kmap_flush_unused();
1570 }
1571 }
1572}
1573
1574static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1575{
1576 xen_alloc_ptpage(mm, pfn, PT_PTE);
1577}
1578
1579static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1580{
1581 xen_alloc_ptpage(mm, pfn, PT_PMD);
1582}
1583
1584/* This should never happen until we're OK to use struct page */
1585static void xen_release_ptpage(unsigned long pfn, unsigned level)
1586{
1587 struct page *page = pfn_to_page(pfn);
1588
1589 if (PagePinned(page)) {
1590 if (!PageHighMem(page)) {
1591 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1592 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1593 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1594 }
1595 ClearPagePinned(page);
1596 }
1597}
1598
1599static void xen_release_pte(unsigned long pfn)
1600{
1601 xen_release_ptpage(pfn, PT_PTE);
1602}
1603
1604static void xen_release_pmd(unsigned long pfn)
1605{
1606 xen_release_ptpage(pfn, PT_PMD);
1607}
1608
1609#if PAGETABLE_LEVELS == 4
1610static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1611{
1612 xen_alloc_ptpage(mm, pfn, PT_PUD);
1613}
1614
1615static void xen_release_pud(unsigned long pfn)
1616{
1617 xen_release_ptpage(pfn, PT_PUD);
1618}
1619#endif
1620
1621void __init xen_reserve_top(void)
1622{
1623#ifdef CONFIG_X86_32
1624 unsigned long top = HYPERVISOR_VIRT_START;
1625 struct xen_platform_parameters pp;
1626
1627 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1628 top = pp.virt_start;
1629
1630 reserve_top_address(-top);
1631#endif /* CONFIG_X86_32 */
1632}
1633
1634/*
1635 * Like __va(), but returns address in the kernel mapping (which is
1636 * all we have until the physical memory mapping has been set up.
1637 */
1638static void *__ka(phys_addr_t paddr)
1639{
1640#ifdef CONFIG_X86_64
1641 return (void *)(paddr + __START_KERNEL_map);
1642#else
1643 return __va(paddr);
1644#endif
1645}
1646
1647/* Convert a machine address to physical address */
1648static unsigned long m2p(phys_addr_t maddr)
1649{
1650 phys_addr_t paddr;
1651
1652 maddr &= PTE_PFN_MASK;
1653 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1654
1655 return paddr;
1656}
1657
1658/* Convert a machine address to kernel virtual */
1659static void *m2v(phys_addr_t maddr)
1660{
1661 return __ka(m2p(maddr));
1662}
1663
Juan Quintela4ec53872010-09-02 15:45:43 +01001664/* Set the page permissions on an identity-mapped pages */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001665static void set_page_prot(void *addr, pgprot_t prot)
1666{
1667 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1668 pte_t pte = pfn_pte(pfn, prot);
1669
1670 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1671 BUG();
1672}
1673
1674static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1675{
1676 unsigned pmdidx, pteidx;
1677 unsigned ident_pte;
1678 unsigned long pfn;
1679
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001680 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1681 PAGE_SIZE);
1682
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001683 ident_pte = 0;
1684 pfn = 0;
1685 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1686 pte_t *pte_page;
1687
1688 /* Reuse or allocate a page of ptes */
1689 if (pmd_present(pmd[pmdidx]))
1690 pte_page = m2v(pmd[pmdidx].pmd);
1691 else {
1692 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001693 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001694 break;
1695
1696 pte_page = &level1_ident_pgt[ident_pte];
1697 ident_pte += PTRS_PER_PTE;
1698
1699 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1700 }
1701
1702 /* Install mappings */
1703 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1704 pte_t pte;
1705
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001706 if (!pte_none(pte_page[pteidx]))
1707 continue;
1708
1709 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1710 pte_page[pteidx] = pte;
1711 }
1712 }
1713
1714 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1715 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1716
1717 set_page_prot(pmd, PAGE_KERNEL_RO);
1718}
1719
Ian Campbell7e775062010-09-30 12:37:26 +01001720void __init xen_setup_machphys_mapping(void)
1721{
1722 struct xen_machphys_mapping mapping;
1723 unsigned long machine_to_phys_nr_ents;
1724
1725 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1726 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1727 machine_to_phys_nr_ents = mapping.max_mfn + 1;
1728 } else {
1729 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
1730 }
1731 machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
1732}
1733
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001734#ifdef CONFIG_X86_64
1735static void convert_pfn_mfn(void *v)
1736{
1737 pte_t *pte = v;
1738 int i;
1739
1740 /* All levels are converted the same way, so just treat them
1741 as ptes. */
1742 for (i = 0; i < PTRS_PER_PTE; i++)
1743 pte[i] = xen_make_pte(pte[i].pte);
1744}
1745
1746/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001747 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001748 *
1749 * We can construct this by grafting the Xen provided pagetable into
1750 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1751 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1752 * means that only the kernel has a physical mapping to start with -
1753 * but that's enough to get __va working. We need to fill in the rest
1754 * of the physical mapping once some sort of allocator has been set
1755 * up.
1756 */
1757__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1758 unsigned long max_pfn)
1759{
1760 pud_t *l3;
1761 pmd_t *l2;
1762
Stefano Stabellini14988a42011-02-18 11:32:40 +00001763 /* max_pfn_mapped is the last pfn mapped in the initial memory
1764 * mappings. Considering that on Xen after the kernel mappings we
1765 * have the mappings of some pages that don't exist in pfn space, we
1766 * set max_pfn_mapped to the last real pfn mapped. */
1767 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1768
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001769 /* Zap identity mapping */
1770 init_level4_pgt[0] = __pgd(0);
1771
1772 /* Pre-constructed entries are in pfn, so convert to mfn */
1773 convert_pfn_mfn(init_level4_pgt);
1774 convert_pfn_mfn(level3_ident_pgt);
1775 convert_pfn_mfn(level3_kernel_pgt);
1776
1777 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1778 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1779
1780 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1781 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1782
1783 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1784 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1785 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1786
1787 /* Set up identity map */
1788 xen_map_identity_early(level2_ident_pgt, max_pfn);
1789
1790 /* Make pagetable pieces RO */
1791 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1792 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1793 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1794 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1795 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1796 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1797
1798 /* Pin down new L4 */
1799 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1800 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1801
1802 /* Unpin Xen-provided one */
1803 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1804
1805 /* Switch over */
1806 pgd = init_level4_pgt;
1807
1808 /*
1809 * At this stage there can be no user pgd, and no page
1810 * structure to attach it to, so make sure we just set kernel
1811 * pgd.
1812 */
1813 xen_mc_batch();
1814 __xen_write_cr3(true, __pa(pgd));
1815 xen_mc_issue(PARAVIRT_LAZY_CPU);
1816
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07001817 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001818 __pa(xen_start_info->pt_base +
1819 xen_start_info->nr_pt_frames * PAGE_SIZE),
1820 "XEN PAGETABLES");
1821
1822 return pgd;
1823}
1824#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001825static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1826static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1827
1828static __init void xen_write_cr3_init(unsigned long cr3)
1829{
1830 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1831
1832 BUG_ON(read_cr3() != __pa(initial_page_table));
1833 BUG_ON(cr3 != __pa(swapper_pg_dir));
1834
1835 /*
1836 * We are switching to swapper_pg_dir for the first time (from
1837 * initial_page_table) and therefore need to mark that page
1838 * read-only and then pin it.
1839 *
1840 * Xen disallows sharing of kernel PMDs for PAE
1841 * guests. Therefore we must copy the kernel PMD from
1842 * initial_page_table into a new kernel PMD to be used in
1843 * swapper_pg_dir.
1844 */
1845 swapper_kernel_pmd =
1846 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1847 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
1848 sizeof(pmd_t) * PTRS_PER_PMD);
1849 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1850 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1851 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1852
1853 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1854 xen_write_cr3(cr3);
1855 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1856
1857 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1858 PFN_DOWN(__pa(initial_page_table)));
1859 set_page_prot(initial_page_table, PAGE_KERNEL);
1860 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1861
1862 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1863}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001864
1865__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1866 unsigned long max_pfn)
1867{
1868 pmd_t *kernel_pmd;
1869
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001870 initial_kernel_pmd =
1871 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001872
Stefano Stabellini14988a42011-02-18 11:32:40 +00001873 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001874
1875 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001876 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001877
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001878 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001879
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001880 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1881 initial_page_table[KERNEL_PGD_BOUNDARY] =
1882 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001883
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001884 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1885 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001886 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1887
1888 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1889
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001890 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1891 PFN_DOWN(__pa(initial_page_table)));
1892 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001893
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07001894 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07001895 __pa(xen_start_info->pt_base +
1896 xen_start_info->nr_pt_frames * PAGE_SIZE),
1897 "XEN PAGETABLES");
1898
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001899 return initial_page_table;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001900}
1901#endif /* CONFIG_X86_64 */
1902
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001903static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1904
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001905static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001906{
1907 pte_t pte;
1908
1909 phys >>= PAGE_SHIFT;
1910
1911 switch (idx) {
1912 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1913#ifdef CONFIG_X86_F00F_BUG
1914 case FIX_F00F_IDT:
1915#endif
1916#ifdef CONFIG_X86_32
1917 case FIX_WP_TEST:
1918 case FIX_VDSO:
1919# ifdef CONFIG_HIGHMEM
1920 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1921# endif
1922#else
1923 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1924#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08001925 case FIX_TEXT_POKE0:
1926 case FIX_TEXT_POKE1:
1927 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001928 pte = pfn_pte(phys, prot);
1929 break;
1930
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001931#ifdef CONFIG_X86_LOCAL_APIC
1932 case FIX_APIC_BASE: /* maps dummy local APIC */
1933 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1934 break;
1935#endif
1936
1937#ifdef CONFIG_X86_IO_APIC
1938 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1939 /*
1940 * We just don't map the IO APIC - all access is via
1941 * hypercalls. Keep the address in the pte for reference.
1942 */
1943 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1944 break;
1945#endif
1946
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001947 case FIX_PARAVIRT_BOOTMAP:
1948 /* This is an MFN, but it isn't an IO mapping from the
1949 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001950 pte = mfn_pte(phys, prot);
1951 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001952
1953 default:
1954 /* By default, set_fixmap is used for hardware mappings */
1955 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1956 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001957 }
1958
1959 __native_set_fixmap(idx, pte);
1960
1961#ifdef CONFIG_X86_64
1962 /* Replicate changes to map the vsyscall page into the user
1963 pagetable vsyscall mapping. */
1964 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1965 unsigned long vaddr = __fix_to_virt(idx);
1966 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1967 }
1968#endif
1969}
1970
Juan Quintela4ec53872010-09-02 15:45:43 +01001971__init void xen_ident_map_ISA(void)
1972{
1973 unsigned long pa;
1974
1975 /*
1976 * If we're dom0, then linear map the ISA machine addresses into
1977 * the kernel's address space.
1978 */
1979 if (!xen_initial_domain())
1980 return;
1981
1982 xen_raw_printk("Xen: setup ISA identity maps\n");
1983
1984 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
1985 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
1986
1987 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
1988 BUG();
1989 }
1990
1991 xen_flush_tlb();
1992}
1993
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001994static __init void xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001995{
Konrad Rzeszutek Wilkfc251512010-12-23 16:25:29 -05001996#ifdef CONFIG_XEN_DEBUG
1997 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
1998#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001999 pv_mmu_ops.set_pte = xen_set_pte;
2000 pv_mmu_ops.set_pmd = xen_set_pmd;
2001 pv_mmu_ops.set_pud = xen_set_pud;
2002#if PAGETABLE_LEVELS == 4
2003 pv_mmu_ops.set_pgd = xen_set_pgd;
2004#endif
2005
2006 /* This will work as long as patching hasn't happened yet
2007 (which it hasn't) */
2008 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2009 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2010 pv_mmu_ops.release_pte = xen_release_pte;
2011 pv_mmu_ops.release_pmd = xen_release_pmd;
2012#if PAGETABLE_LEVELS == 4
2013 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2014 pv_mmu_ops.release_pud = xen_release_pud;
2015#endif
2016
2017#ifdef CONFIG_X86_64
2018 SetPagePinned(virt_to_page(level3_user_vsyscall));
2019#endif
2020 xen_mark_init_mm_pinned();
2021}
2022
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002023static void xen_leave_lazy_mmu(void)
2024{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002025 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002026 xen_mc_flush();
2027 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002028 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002029}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002030
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002031static const struct pv_mmu_ops xen_mmu_ops __initdata = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002032 .read_cr2 = xen_read_cr2,
2033 .write_cr2 = xen_write_cr2,
2034
2035 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002036#ifdef CONFIG_X86_32
2037 .write_cr3 = xen_write_cr3_init,
2038#else
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002039 .write_cr3 = xen_write_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002040#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002041
2042 .flush_tlb_user = xen_flush_tlb,
2043 .flush_tlb_kernel = xen_flush_tlb,
2044 .flush_tlb_single = xen_flush_tlb_single,
2045 .flush_tlb_others = xen_flush_tlb_others,
2046
2047 .pte_update = paravirt_nop,
2048 .pte_update_defer = paravirt_nop,
2049
2050 .pgd_alloc = xen_pgd_alloc,
2051 .pgd_free = xen_pgd_free,
2052
2053 .alloc_pte = xen_alloc_pte_init,
2054 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002055 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002056 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002057
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002058 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002059 .set_pte_at = xen_set_pte_at,
2060 .set_pmd = xen_set_pmd_hyper,
2061
2062 .ptep_modify_prot_start = __ptep_modify_prot_start,
2063 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2064
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002065 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2066 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002067
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002068 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2069 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002070
2071#ifdef CONFIG_X86_PAE
2072 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002073 .pte_clear = xen_pte_clear,
2074 .pmd_clear = xen_pmd_clear,
2075#endif /* CONFIG_X86_PAE */
2076 .set_pud = xen_set_pud_hyper,
2077
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002078 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2079 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002080
2081#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002082 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2083 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002084 .set_pgd = xen_set_pgd_hyper,
2085
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002086 .alloc_pud = xen_alloc_pmd_init,
2087 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002088#endif /* PAGETABLE_LEVELS == 4 */
2089
2090 .activate_mm = xen_activate_mm,
2091 .dup_mmap = xen_dup_mmap,
2092 .exit_mmap = xen_exit_mmap,
2093
2094 .lazy_mode = {
2095 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002096 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002097 },
2098
2099 .set_fixmap = xen_set_fixmap,
2100};
2101
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002102void __init xen_init_mmu_ops(void)
2103{
2104 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2105 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2106 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002107
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002108 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002109}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002110
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002111/* Protected by xen_reservation_lock. */
2112#define MAX_CONTIG_ORDER 9 /* 2MB */
2113static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2114
2115#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2116static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2117 unsigned long *in_frames,
2118 unsigned long *out_frames)
2119{
2120 int i;
2121 struct multicall_space mcs;
2122
2123 xen_mc_batch();
2124 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2125 mcs = __xen_mc_entry(0);
2126
2127 if (in_frames)
2128 in_frames[i] = virt_to_mfn(vaddr);
2129
2130 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002131 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002132
2133 if (out_frames)
2134 out_frames[i] = virt_to_pfn(vaddr);
2135 }
2136 xen_mc_issue(0);
2137}
2138
2139/*
2140 * Update the pfn-to-mfn mappings for a virtual address range, either to
2141 * point to an array of mfns, or contiguously from a single starting
2142 * mfn.
2143 */
2144static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2145 unsigned long *mfns,
2146 unsigned long first_mfn)
2147{
2148 unsigned i, limit;
2149 unsigned long mfn;
2150
2151 xen_mc_batch();
2152
2153 limit = 1u << order;
2154 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2155 struct multicall_space mcs;
2156 unsigned flags;
2157
2158 mcs = __xen_mc_entry(0);
2159 if (mfns)
2160 mfn = mfns[i];
2161 else
2162 mfn = first_mfn + i;
2163
2164 if (i < (limit - 1))
2165 flags = 0;
2166 else {
2167 if (order == 0)
2168 flags = UVMF_INVLPG | UVMF_ALL;
2169 else
2170 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2171 }
2172
2173 MULTI_update_va_mapping(mcs.mc, vaddr,
2174 mfn_pte(mfn, PAGE_KERNEL), flags);
2175
2176 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2177 }
2178
2179 xen_mc_issue(0);
2180}
2181
2182/*
2183 * Perform the hypercall to exchange a region of our pfns to point to
2184 * memory with the required contiguous alignment. Takes the pfns as
2185 * input, and populates mfns as output.
2186 *
2187 * Returns a success code indicating whether the hypervisor was able to
2188 * satisfy the request or not.
2189 */
2190static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2191 unsigned long *pfns_in,
2192 unsigned long extents_out,
2193 unsigned int order_out,
2194 unsigned long *mfns_out,
2195 unsigned int address_bits)
2196{
2197 long rc;
2198 int success;
2199
2200 struct xen_memory_exchange exchange = {
2201 .in = {
2202 .nr_extents = extents_in,
2203 .extent_order = order_in,
2204 .extent_start = pfns_in,
2205 .domid = DOMID_SELF
2206 },
2207 .out = {
2208 .nr_extents = extents_out,
2209 .extent_order = order_out,
2210 .extent_start = mfns_out,
2211 .address_bits = address_bits,
2212 .domid = DOMID_SELF
2213 }
2214 };
2215
2216 BUG_ON(extents_in << order_in != extents_out << order_out);
2217
2218 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2219 success = (exchange.nr_exchanged == extents_in);
2220
2221 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2222 BUG_ON(success && (rc != 0));
2223
2224 return success;
2225}
2226
2227int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2228 unsigned int address_bits)
2229{
2230 unsigned long *in_frames = discontig_frames, out_frame;
2231 unsigned long flags;
2232 int success;
2233
2234 /*
2235 * Currently an auto-translated guest will not perform I/O, nor will
2236 * it require PAE page directories below 4GB. Therefore any calls to
2237 * this function are redundant and can be ignored.
2238 */
2239
2240 if (xen_feature(XENFEAT_auto_translated_physmap))
2241 return 0;
2242
2243 if (unlikely(order > MAX_CONTIG_ORDER))
2244 return -ENOMEM;
2245
2246 memset((void *) vstart, 0, PAGE_SIZE << order);
2247
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002248 spin_lock_irqsave(&xen_reservation_lock, flags);
2249
2250 /* 1. Zap current PTEs, remembering MFNs. */
2251 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2252
2253 /* 2. Get a new contiguous memory extent. */
2254 out_frame = virt_to_pfn(vstart);
2255 success = xen_exchange_memory(1UL << order, 0, in_frames,
2256 1, order, &out_frame,
2257 address_bits);
2258
2259 /* 3. Map the new extent in place of old pages. */
2260 if (success)
2261 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2262 else
2263 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2264
2265 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2266
2267 return success ? 0 : -ENOMEM;
2268}
2269EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2270
2271void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2272{
2273 unsigned long *out_frames = discontig_frames, in_frame;
2274 unsigned long flags;
2275 int success;
2276
2277 if (xen_feature(XENFEAT_auto_translated_physmap))
2278 return;
2279
2280 if (unlikely(order > MAX_CONTIG_ORDER))
2281 return;
2282
2283 memset((void *) vstart, 0, PAGE_SIZE << order);
2284
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002285 spin_lock_irqsave(&xen_reservation_lock, flags);
2286
2287 /* 1. Find start MFN of contiguous extent. */
2288 in_frame = virt_to_mfn(vstart);
2289
2290 /* 2. Zap current PTEs. */
2291 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2292
2293 /* 3. Do the exchange for non-contiguous MFNs. */
2294 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2295 0, out_frames, 0);
2296
2297 /* 4. Map new pages in place of old pages. */
2298 if (success)
2299 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2300 else
2301 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2302
2303 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2304}
2305EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2306
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002307#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002308static void xen_hvm_exit_mmap(struct mm_struct *mm)
2309{
2310 struct xen_hvm_pagetable_dying a;
2311 int rc;
2312
2313 a.domid = DOMID_SELF;
2314 a.gpa = __pa(mm->pgd);
2315 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2316 WARN_ON_ONCE(rc < 0);
2317}
2318
2319static int is_pagetable_dying_supported(void)
2320{
2321 struct xen_hvm_pagetable_dying a;
2322 int rc = 0;
2323
2324 a.domid = DOMID_SELF;
2325 a.gpa = 0x00;
2326 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2327 if (rc < 0) {
2328 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2329 return 0;
2330 }
2331 return 1;
2332}
2333
2334void __init xen_hvm_init_mmu_ops(void)
2335{
2336 if (is_pagetable_dying_supported())
2337 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2338}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002339#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002340
Ian Campbellde1ef202009-05-21 10:09:46 +01002341#define REMAP_BATCH_SIZE 16
2342
2343struct remap_data {
2344 unsigned long mfn;
2345 pgprot_t prot;
2346 struct mmu_update *mmu_update;
2347};
2348
2349static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2350 unsigned long addr, void *data)
2351{
2352 struct remap_data *rmd = data;
2353 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2354
2355 rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
2356 rmd->mmu_update->val = pte_val_ma(pte);
2357 rmd->mmu_update++;
2358
2359 return 0;
2360}
2361
2362int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2363 unsigned long addr,
2364 unsigned long mfn, int nr,
2365 pgprot_t prot, unsigned domid)
2366{
2367 struct remap_data rmd;
2368 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2369 int batch;
2370 unsigned long range;
2371 int err = 0;
2372
2373 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2374
Stefano Stabellinie060e7a2010-11-11 12:37:43 -08002375 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2376 (VM_PFNMAP | VM_RESERVED | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002377
2378 rmd.mfn = mfn;
2379 rmd.prot = prot;
2380
2381 while (nr) {
2382 batch = min(REMAP_BATCH_SIZE, nr);
2383 range = (unsigned long)batch << PAGE_SHIFT;
2384
2385 rmd.mmu_update = mmu_update;
2386 err = apply_to_page_range(vma->vm_mm, addr, range,
2387 remap_area_mfn_pte_fn, &rmd);
2388 if (err)
2389 goto out;
2390
2391 err = -EFAULT;
2392 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2393 goto out;
2394
2395 nr -= batch;
2396 addr += range;
2397 }
2398
2399 err = 0;
2400out:
2401
2402 flush_tlb_all();
2403
2404 return err;
2405}
2406EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2407
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07002408#ifdef CONFIG_XEN_DEBUG_FS
2409
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -05002410static int p2m_dump_open(struct inode *inode, struct file *filp)
2411{
2412 return single_open(filp, p2m_dump_show, NULL);
2413}
2414
2415static const struct file_operations p2m_dump_fops = {
2416 .open = p2m_dump_open,
2417 .read = seq_read,
2418 .llseek = seq_lseek,
2419 .release = single_release,
2420};
2421
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07002422static struct dentry *d_mmu_debug;
2423
2424static int __init xen_mmu_debugfs(void)
2425{
2426 struct dentry *d_xen = xen_init_debugfs();
2427
2428 if (d_xen == NULL)
2429 return -ENOMEM;
2430
2431 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2432
2433 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2434
2435 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2436 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2437 &mmu_stats.pgd_update_pinned);
2438 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2439 &mmu_stats.pgd_update_pinned);
2440
2441 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2442 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2443 &mmu_stats.pud_update_pinned);
2444 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2445 &mmu_stats.pud_update_pinned);
2446
2447 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2448 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2449 &mmu_stats.pmd_update_pinned);
2450 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2451 &mmu_stats.pmd_update_pinned);
2452
2453 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2454// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2455// &mmu_stats.pte_update_pinned);
2456 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2457 &mmu_stats.pte_update_pinned);
2458
2459 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2460 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2461 &mmu_stats.mmu_update_extended);
2462 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2463 mmu_stats.mmu_update_histo, 20);
2464
2465 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2466 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2467 &mmu_stats.set_pte_at_batched);
2468 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2469 &mmu_stats.set_pte_at_current);
2470 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2471 &mmu_stats.set_pte_at_kernel);
2472
2473 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2474 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2475 &mmu_stats.prot_commit_batched);
2476
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -05002477 debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07002478 return 0;
2479}
2480fs_initcall(xen_mmu_debugfs);
2481
2482#endif /* CONFIG_XEN_DEBUG_FS */