blob: ae173f6edd8ba044f5cc5372193ea7ce0af2e299 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070045
46#include <asm/pgtable.h>
47#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070048#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070050#include <asm/paravirt.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070051#include <asm/linkage.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070054#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070055
56#include <xen/page.h>
57#include <xen/interface/xen.h>
58
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070059#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070060#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070061#include "debugfs.h"
62
63#define MMU_UPDATE_HISTO 30
64
65#ifdef CONFIG_XEN_DEBUG_FS
66
67static struct {
68 u32 pgd_update;
69 u32 pgd_update_pinned;
70 u32 pgd_update_batched;
71
72 u32 pud_update;
73 u32 pud_update_pinned;
74 u32 pud_update_batched;
75
76 u32 pmd_update;
77 u32 pmd_update_pinned;
78 u32 pmd_update_batched;
79
80 u32 pte_update;
81 u32 pte_update_pinned;
82 u32 pte_update_batched;
83
84 u32 mmu_update;
85 u32 mmu_update_extended;
86 u32 mmu_update_histo[MMU_UPDATE_HISTO];
87
88 u32 prot_commit;
89 u32 prot_commit_batched;
90
91 u32 set_pte_at;
92 u32 set_pte_at_batched;
93 u32 set_pte_at_pinned;
94 u32 set_pte_at_current;
95 u32 set_pte_at_kernel;
96} mmu_stats;
97
98static u8 zero_stats;
99
100static inline void check_zero(void)
101{
102 if (unlikely(zero_stats)) {
103 memset(&mmu_stats, 0, sizeof(mmu_stats));
104 zero_stats = 0;
105 }
106}
107
108#define ADD_STATS(elem, val) \
109 do { check_zero(); mmu_stats.elem += (val); } while(0)
110
111#else /* !CONFIG_XEN_DEBUG_FS */
112
113#define ADD_STATS(elem, val) do { (void)(val); } while(0)
114
115#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700116
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700117/*
118 * Just beyond the highest usermode address. STACK_TOP_MAX has a
119 * redzone above it, so round it up to a PGD boundary.
120 */
121#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
122
123
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100124#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100125#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100126
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100127/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700128static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100129 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
130
131 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700132static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100133 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100134
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100135/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700136static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100137
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -0700138static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
139 __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100140
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100141static inline unsigned p2m_top_index(unsigned long pfn)
142{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100143 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100144 return pfn / P2M_ENTRIES_PER_PAGE;
145}
146
147static inline unsigned p2m_index(unsigned long pfn)
148{
149 return pfn % P2M_ENTRIES_PER_PAGE;
150}
151
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100152/* Build the parallel p2m_top_mfn structures */
153void xen_setup_mfn_list_list(void)
154{
155 unsigned pfn, idx;
156
157 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
158 unsigned topidx = p2m_top_index(pfn);
159
160 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
161 }
162
163 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
164 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
165 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
166 }
167
168 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
169
170 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
171 virt_to_mfn(p2m_top_mfn_list);
172 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
173}
174
175/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100176void __init xen_build_dynamic_phys_to_machine(void)
177{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100178 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100179 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100180 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100181
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100182 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100183 unsigned topidx = p2m_top_index(pfn);
184
185 p2m_top[topidx] = &mfn_list[pfn];
186 }
187}
188
189unsigned long get_phys_to_machine(unsigned long pfn)
190{
191 unsigned topidx, idx;
192
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100193 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
194 return INVALID_P2M_ENTRY;
195
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100196 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100197 idx = p2m_index(pfn);
198 return p2m_top[topidx][idx];
199}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200200EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100201
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100202static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100203{
204 unsigned long *p;
205 unsigned i;
206
207 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
208 BUG_ON(p == NULL);
209
210 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
211 p[i] = INVALID_P2M_ENTRY;
212
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100213 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100214 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100215 else
216 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100217}
218
219void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
220{
221 unsigned topidx, idx;
222
223 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
224 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
225 return;
226 }
227
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100228 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
229 BUG_ON(mfn != INVALID_P2M_ENTRY);
230 return;
231 }
232
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100233 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100234 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100235 /* no need to allocate a page to store an invalid entry */
236 if (mfn == INVALID_P2M_ENTRY)
237 return;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100238 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100239 }
240
241 idx = p2m_index(pfn);
242 p2m_top[topidx][idx] = mfn;
243}
244
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700245xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700246{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700247 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100248 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +0100249 pte_t *pte = lookup_address(address, &level);
Jan Beulichde067812008-05-15 13:24:52 +0100250 unsigned offset = address & ~PAGE_MASK;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700251
252 BUG_ON(pte == NULL);
253
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700254 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700255}
256
257void make_lowmem_page_readonly(void *vaddr)
258{
259 pte_t *pte, ptev;
260 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100261 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700262
Ingo Molnarf0646e42008-01-30 13:33:43 +0100263 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700264 BUG_ON(pte == NULL);
265
266 ptev = pte_wrprotect(*pte);
267
268 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
269 BUG();
270}
271
272void make_lowmem_page_readwrite(void *vaddr)
273{
274 pte_t *pte, ptev;
275 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100276 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700277
Ingo Molnarf0646e42008-01-30 13:33:43 +0100278 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700279 BUG_ON(pte == NULL);
280
281 ptev = pte_mkwrite(*pte);
282
283 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
284 BUG();
285}
286
287
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700288static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100289{
290 struct page *page = virt_to_page(ptr);
291
292 return PagePinned(page);
293}
294
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700295static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700296{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700297 struct multicall_space mcs;
298 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700299
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700300 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
301
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700302 if (mcs.mc != NULL) {
303 ADD_STATS(mmu_update_extended, 1);
304 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
305
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700306 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700307
308 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
309 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
310 else
311 ADD_STATS(mmu_update_histo[0], 1);
312 } else {
313 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700314 mcs = __xen_mc_entry(sizeof(*u));
315 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700316 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700317 }
318
319 u = mcs.args;
320 *u = *update;
321}
322
323void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
324{
325 struct mmu_update u;
326
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700327 preempt_disable();
328
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700329 xen_mc_batch();
330
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700331 /* ptr may be ioremapped for 64-bit pagetable setup */
332 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700333 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700334 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700335
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700336 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
337
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700338 xen_mc_issue(PARAVIRT_LAZY_MMU);
339
340 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700341}
342
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100343void xen_set_pmd(pmd_t *ptr, pmd_t val)
344{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700345 ADD_STATS(pmd_update, 1);
346
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100347 /* If page is not pinned, we can just update the entry
348 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700349 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100350 *ptr = val;
351 return;
352 }
353
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700354 ADD_STATS(pmd_update_pinned, 1);
355
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100356 xen_set_pmd_hyper(ptr, val);
357}
358
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700359/*
360 * Associate a virtual page frame with a given physical page frame
361 * and protection flags for that frame.
362 */
363void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
364{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700365 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700366}
367
368void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
369 pte_t *ptep, pte_t pteval)
370{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700371 /* updates to init_mm may be done without lock */
372 if (mm == &init_mm)
373 preempt_disable();
374
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700375 ADD_STATS(set_pte_at, 1);
376// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
377 ADD_STATS(set_pte_at_current, mm == current->mm);
378 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
379
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700380 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700381 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700382 struct multicall_space mcs;
383 mcs = xen_mc_entry(0);
384
385 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700386 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700387 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700388 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700389 } else
390 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700391 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700392 }
393 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700394
395out:
396 if (mm == &init_mm)
397 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700398}
399
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700400pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
401{
402 /* Just return the pte as-is. We preserve the bits on commit */
403 return *ptep;
404}
405
406void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
407 pte_t *ptep, pte_t pte)
408{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700409 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700410
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700411 xen_mc_batch();
412
413 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
414 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700415 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700416
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700417 ADD_STATS(prot_commit, 1);
418 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
419
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700420 xen_mc_issue(PARAVIRT_LAZY_MMU);
421}
422
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700423/* Assume pteval_t is equivalent to all the other *val_t types. */
424static pteval_t pte_mfn_to_pfn(pteval_t val)
425{
426 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700427 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700428 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700429 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700430 }
431
432 return val;
433}
434
435static pteval_t pte_pfn_to_mfn(pteval_t val)
436{
437 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700438 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700439 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700440 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700441 }
442
443 return val;
444}
445
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700446pteval_t xen_pte_val(pte_t pte)
447{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700448 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700449}
450
451pgdval_t xen_pgd_val(pgd_t pgd)
452{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700453 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700454}
455
456pte_t xen_make_pte(pteval_t pte)
457{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700458 pte = pte_pfn_to_mfn(pte);
459 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700460}
461
462pgd_t xen_make_pgd(pgdval_t pgd)
463{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700464 pgd = pte_pfn_to_mfn(pgd);
465 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700466}
467
468pmdval_t xen_pmd_val(pmd_t pmd)
469{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700470 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700471}
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100472
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100473void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700474{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700475 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700476
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700477 preempt_disable();
478
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700479 xen_mc_batch();
480
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700481 /* ptr may be ioremapped for 64-bit pagetable setup */
482 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700483 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700484 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700485
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700486 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
487
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700488 xen_mc_issue(PARAVIRT_LAZY_MMU);
489
490 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700491}
492
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100493void xen_set_pud(pud_t *ptr, pud_t val)
494{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700495 ADD_STATS(pud_update, 1);
496
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100497 /* If page is not pinned, we can just update the entry
498 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700499 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100500 *ptr = val;
501 return;
502 }
503
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700504 ADD_STATS(pud_update_pinned, 1);
505
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100506 xen_set_pud_hyper(ptr, val);
507}
508
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700509void xen_set_pte(pte_t *ptep, pte_t pte)
510{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700511 ADD_STATS(pte_update, 1);
512// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
513 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
514
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700515#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700516 ptep->pte_high = pte.pte_high;
517 smp_wmb();
518 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700519#else
520 *ptep = pte;
521#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700522}
523
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700524#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700525void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
526{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700527 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700528}
529
530void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
531{
532 ptep->pte_low = 0;
533 smp_wmb(); /* make sure low gets written first */
534 ptep->pte_high = 0;
535}
536
537void xen_pmd_clear(pmd_t *pmdp)
538{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100539 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700540}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700541#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700542
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700543pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700544{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700545 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700546 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700547}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700548
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700549#if PAGETABLE_LEVELS == 4
550pudval_t xen_pud_val(pud_t pud)
551{
552 return pte_mfn_to_pfn(pud.pud);
553}
554
555pud_t xen_make_pud(pudval_t pud)
556{
557 pud = pte_pfn_to_mfn(pud);
558
559 return native_make_pud(pud);
560}
561
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700562pgd_t *xen_get_user_pgd(pgd_t *pgd)
563{
564 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
565 unsigned offset = pgd - pgd_page;
566 pgd_t *user_ptr = NULL;
567
568 if (offset < pgd_index(USER_LIMIT)) {
569 struct page *page = virt_to_page(pgd_page);
570 user_ptr = (pgd_t *)page->private;
571 if (user_ptr)
572 user_ptr += offset;
573 }
574
575 return user_ptr;
576}
577
578static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700579{
580 struct mmu_update u;
581
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700582 u.ptr = virt_to_machine(ptr).maddr;
583 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700584 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700585}
586
587/*
588 * Raw hypercall-based set_pgd, intended for in early boot before
589 * there's a page structure. This implies:
590 * 1. The only existing pagetable is the kernel's
591 * 2. It is always pinned
592 * 3. It has no user pagetable attached to it
593 */
594void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
595{
596 preempt_disable();
597
598 xen_mc_batch();
599
600 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700601
602 xen_mc_issue(PARAVIRT_LAZY_MMU);
603
604 preempt_enable();
605}
606
607void xen_set_pgd(pgd_t *ptr, pgd_t val)
608{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700609 pgd_t *user_ptr = xen_get_user_pgd(ptr);
610
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700611 ADD_STATS(pgd_update, 1);
612
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700613 /* If page is not pinned, we can just update the entry
614 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700615 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700616 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700617 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700618 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700619 *user_ptr = val;
620 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700621 return;
622 }
623
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700624 ADD_STATS(pgd_update_pinned, 1);
625 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
626
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700627 /* If it's pinned, then we can at least batch the kernel and
628 user updates together. */
629 xen_mc_batch();
630
631 __xen_set_pgd_hyper(ptr, val);
632 if (user_ptr)
633 __xen_set_pgd_hyper(user_ptr, val);
634
635 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700636}
637#endif /* PAGETABLE_LEVELS == 4 */
638
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700639/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700640 * (Yet another) pagetable walker. This one is intended for pinning a
641 * pagetable. This means that it walks a pagetable and calls the
642 * callback function on each page it finds making up the page table,
643 * at every level. It walks the entire pagetable, but it only bothers
644 * pinning pte pages which are below limit. In the normal case this
645 * will be STACK_TOP_MAX, but at boot we need to pin up to
646 * FIXADDR_TOP.
647 *
648 * For 32-bit the important bit is that we don't pin beyond there,
649 * because then we start getting into Xen's ptes.
650 *
651 * For 64-bit, we must skip the Xen hole in the middle of the address
652 * space, just after the big x86-64 virtual hole.
653 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700654static int xen_pgd_walk(struct mm_struct *mm,
655 int (*func)(struct mm_struct *mm, struct page *,
656 enum pt_level),
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700657 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700658{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700659 pgd_t *pgd = mm->pgd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700660 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700661 unsigned hole_low, hole_high;
662 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
663 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700664
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700665 /* The limit is the last byte to be touched */
666 limit--;
667 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700668
669 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700670 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700671
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700672 /*
673 * 64-bit has a great big hole in the middle of the address
674 * space, which contains the Xen mappings. On 32-bit these
675 * will end up making a zero-sized hole and so is a no-op.
676 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700677 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700678 hole_high = pgd_index(PAGE_OFFSET);
679
680 pgdidx_limit = pgd_index(limit);
681#if PTRS_PER_PUD > 1
682 pudidx_limit = pud_index(limit);
683#else
684 pudidx_limit = 0;
685#endif
686#if PTRS_PER_PMD > 1
687 pmdidx_limit = pmd_index(limit);
688#else
689 pmdidx_limit = 0;
690#endif
691
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700692 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700693 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700694
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700695 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700696 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700697
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700698 if (!pgd_val(pgd[pgdidx]))
699 continue;
700
701 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700702
703 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700704 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700705
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700706 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700707 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700708
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700709 if (pgdidx == pgdidx_limit &&
710 pudidx > pudidx_limit)
711 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700712
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700713 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700714 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700715
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700716 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700717
718 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700719 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700720
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700721 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
722 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700723
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700724 if (pgdidx == pgdidx_limit &&
725 pudidx == pudidx_limit &&
726 pmdidx > pmdidx_limit)
727 goto out;
728
729 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700730 continue;
731
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700732 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700733 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700734 }
735 }
736 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700737
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700738out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700739 /* Do the top level last, so that the callbacks can use it as
740 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700741 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700742
743 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700744}
745
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700746/* If we're using split pte locks, then take the page's lock and
747 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700748static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700749{
750 spinlock_t *ptl = NULL;
751
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700752#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700753 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700754 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700755#endif
756
757 return ptl;
758}
759
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700760static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700761{
762 spinlock_t *ptl = v;
763 spin_unlock(ptl);
764}
765
766static void xen_do_pin(unsigned level, unsigned long pfn)
767{
768 struct mmuext_op *op;
769 struct multicall_space mcs;
770
771 mcs = __xen_mc_entry(sizeof(*op));
772 op = mcs.args;
773 op->cmd = level;
774 op->arg1.mfn = pfn_to_mfn(pfn);
775 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
776}
777
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700778static int xen_pin_page(struct mm_struct *mm, struct page *page,
779 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700780{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700781 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700782 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700783
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700784 if (pgfl)
785 flush = 0; /* already pinned */
786 else if (PageHighMem(page))
787 /* kmaps need flushing if we found an unpinned
788 highpage */
789 flush = 1;
790 else {
791 void *pt = lowmem_page_address(page);
792 unsigned long pfn = page_to_pfn(page);
793 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700794 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700795
796 flush = 0;
797
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700798 /*
799 * We need to hold the pagetable lock between the time
800 * we make the pagetable RO and when we actually pin
801 * it. If we don't, then other users may come in and
802 * attempt to update the pagetable by writing it,
803 * which will fail because the memory is RO but not
804 * pinned, so Xen won't do the trap'n'emulate.
805 *
806 * If we're using split pte locks, we can't hold the
807 * entire pagetable's worth of locks during the
808 * traverse, because we may wrap the preempt count (8
809 * bits). The solution is to mark RO and pin each PTE
810 * page while holding the lock. This means the number
811 * of locks we end up holding is never more than a
812 * batch size (~32 entries, at present).
813 *
814 * If we're not using split pte locks, we needn't pin
815 * the PTE pages independently, because we're
816 * protected by the overall pagetable lock.
817 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700818 ptl = NULL;
819 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700820 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700821
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700822 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
823 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700824 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
825
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700826 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700827 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
828
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700829 /* Queue a deferred unlock for when this batch
830 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700831 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700832 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700833 }
834
835 return flush;
836}
837
838/* This is called just after a mm has been created, but it has not
839 been used yet. We need to make sure that its pagetable is all
840 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700841static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700842{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700843 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700844
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700845 if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700846 /* re-enable interrupts for kmap_flush_unused */
847 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700848 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700849 xen_mc_batch();
850 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700851
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700852#ifdef CONFIG_X86_64
853 {
854 pgd_t *user_pgd = xen_get_user_pgd(pgd);
855
856 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
857
858 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700859 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700860 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
861 }
862 }
863#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700864#ifdef CONFIG_X86_PAE
865 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700866 xen_pin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])),
867 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700868#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100869 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700870#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700871 xen_mc_issue(0);
872}
873
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700874static void xen_pgd_pin(struct mm_struct *mm)
875{
876 __xen_pgd_pin(mm, mm->pgd);
877}
878
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100879/*
880 * On save, we need to pin all pagetables to make sure they get their
881 * mfns turned into pfns. Search the list for any unpinned pgds and pin
882 * them (unpinned pgds are not currently in use, probably because the
883 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700884 *
885 * Expected to be called in stop_machine() ("equivalent to taking
886 * every spinlock in the system"), so the locking doesn't really
887 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100888 */
889void xen_mm_pin_all(void)
890{
891 unsigned long flags;
892 struct page *page;
893
894 spin_lock_irqsave(&pgd_lock, flags);
895
896 list_for_each_entry(page, &pgd_list, lru) {
897 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700898 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100899 SetPageSavePinned(page);
900 }
901 }
902
903 spin_unlock_irqrestore(&pgd_lock, flags);
904}
905
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700906/*
907 * The init_mm pagetable is really pinned as soon as its created, but
908 * that's before we have page structures to store the bits. So do all
909 * the book-keeping now.
910 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700911static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
912 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700913{
914 SetPagePinned(page);
915 return 0;
916}
917
918void __init xen_mark_init_mm_pinned(void)
919{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700920 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700921}
922
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700923static int xen_unpin_page(struct mm_struct *mm, struct page *page,
924 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700925{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700926 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700927
928 if (pgfl && !PageHighMem(page)) {
929 void *pt = lowmem_page_address(page);
930 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700931 spinlock_t *ptl = NULL;
932 struct multicall_space mcs;
933
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700934 /*
935 * Do the converse to pin_page. If we're using split
936 * pte locks, we must be holding the lock for while
937 * the pte page is unpinned but still RO to prevent
938 * concurrent updates from seeing it in this
939 * partially-pinned state.
940 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700941 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700942 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700943
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700944 if (ptl)
945 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700946 }
947
948 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700949
950 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
951 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700952 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
953
954 if (ptl) {
955 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700956 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700957 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700958 }
959
960 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700961}
962
963/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700964static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700965{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700966 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700967
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700968 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700969
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700970#ifdef CONFIG_X86_64
971 {
972 pgd_t *user_pgd = xen_get_user_pgd(pgd);
973
974 if (user_pgd) {
975 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700976 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700977 }
978 }
979#endif
980
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700981#ifdef CONFIG_X86_PAE
982 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700983 xen_unpin_page(mm, virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])),
984 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700985#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700986
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700987 xen_pgd_walk(mm, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700988
989 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700990}
991
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700992static void xen_pgd_unpin(struct mm_struct *mm)
993{
994 __xen_pgd_unpin(mm, mm->pgd);
995}
996
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100997/*
998 * On resume, undo any pinning done at save, so that the rest of the
999 * kernel doesn't see any unexpected pinned pagetables.
1000 */
1001void xen_mm_unpin_all(void)
1002{
1003 unsigned long flags;
1004 struct page *page;
1005
1006 spin_lock_irqsave(&pgd_lock, flags);
1007
1008 list_for_each_entry(page, &pgd_list, lru) {
1009 if (PageSavePinned(page)) {
1010 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001011 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001012 ClearPageSavePinned(page);
1013 }
1014 }
1015
1016 spin_unlock_irqrestore(&pgd_lock, flags);
1017}
1018
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001019void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1020{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001021 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001022 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001023 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001024}
1025
1026void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1027{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001028 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001029 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001030 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001031}
1032
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001033
1034#ifdef CONFIG_SMP
1035/* Another cpu may still have their %cr3 pointing at the pagetable, so
1036 we need to repoint it somewhere else before we can unpin it. */
1037static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001038{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001039 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001040 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001041
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001042#ifdef CONFIG_X86_64
1043 active_mm = read_pda(active_mm);
1044#else
1045 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
1046#endif
1047
1048 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001049 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001050
1051 /* If this cpu still has a stale cr3 reference, then make sure
1052 it has been flushed. */
1053 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
1054 load_cr3(swapper_pg_dir);
1055 arch_flush_lazy_cpu_mode();
1056 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001057}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001058
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001059static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001060{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001061 cpumask_t mask;
1062 unsigned cpu;
1063
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001064 if (current->active_mm == mm) {
1065 if (current->mm == mm)
1066 load_cr3(swapper_pg_dir);
1067 else
1068 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001069 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001070 }
1071
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001072 /* Get the "official" set of cpus referring to our pagetable. */
1073 mask = mm->cpu_vm_mask;
1074
1075 /* It's possible that a vcpu may have a stale reference to our
1076 cr3, because its in lazy mode, and it hasn't yet flushed
1077 its set of pending hypercalls yet. In this case, we can
1078 look at its actual current cr3 value, and force it to flush
1079 if needed. */
1080 for_each_online_cpu(cpu) {
1081 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1082 cpu_set(cpu, mask);
1083 }
1084
1085 if (!cpus_empty(mask))
Jens Axboe3b16cf82008-06-26 11:21:54 +02001086 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001087}
1088#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001089static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001090{
1091 if (current->active_mm == mm)
1092 load_cr3(swapper_pg_dir);
1093}
1094#endif
1095
1096/*
1097 * While a process runs, Xen pins its pagetables, which means that the
1098 * hypervisor forces it to be read-only, and it controls all updates
1099 * to it. This means that all pagetable updates have to go via the
1100 * hypervisor, which is moderately expensive.
1101 *
1102 * Since we're pulling the pagetable down, we switch to use init_mm,
1103 * unpin old process pagetable and mark it all read-write, which
1104 * allows further operations on it to be simple memory accesses.
1105 *
1106 * The only subtle point is that another CPU may be still using the
1107 * pagetable because of lazy tlb flushing. This means we need need to
1108 * switch all CPUs off this pagetable before we can unpin it.
1109 */
1110void xen_exit_mmap(struct mm_struct *mm)
1111{
1112 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001113 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001114 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001115
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001116 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001117
1118 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001119 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001120 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001121
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001122 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001123}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001124
1125#ifdef CONFIG_XEN_DEBUG_FS
1126
1127static struct dentry *d_mmu_debug;
1128
1129static int __init xen_mmu_debugfs(void)
1130{
1131 struct dentry *d_xen = xen_init_debugfs();
1132
1133 if (d_xen == NULL)
1134 return -ENOMEM;
1135
1136 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
1137
1138 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
1139
1140 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
1141 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
1142 &mmu_stats.pgd_update_pinned);
1143 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
1144 &mmu_stats.pgd_update_pinned);
1145
1146 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
1147 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
1148 &mmu_stats.pud_update_pinned);
1149 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
1150 &mmu_stats.pud_update_pinned);
1151
1152 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
1153 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
1154 &mmu_stats.pmd_update_pinned);
1155 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
1156 &mmu_stats.pmd_update_pinned);
1157
1158 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
1159// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
1160// &mmu_stats.pte_update_pinned);
1161 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
1162 &mmu_stats.pte_update_pinned);
1163
1164 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
1165 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
1166 &mmu_stats.mmu_update_extended);
1167 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
1168 mmu_stats.mmu_update_histo, 20);
1169
1170 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
1171 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
1172 &mmu_stats.set_pte_at_batched);
1173 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
1174 &mmu_stats.set_pte_at_current);
1175 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
1176 &mmu_stats.set_pte_at_kernel);
1177
1178 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
1179 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
1180 &mmu_stats.prot_commit_batched);
1181
1182 return 0;
1183}
1184fs_initcall(xen_mmu_debugfs);
1185
1186#endif /* CONFIG_XEN_DEBUG_FS */