blob: 846dad7d54a5ed6162c41a5fa1da9b2646b09300 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070048#include <asm/paravirt.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070051#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <xen/page.h>
54#include <xen/interface/xen.h>
55
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070056#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include "mmu.h"
58
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010059#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010060#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010061
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010062/* Placeholder for holes in the address space */
63static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE]
64 __attribute__((section(".data.page_aligned"))) =
65 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
66
67 /* Array of pointers to pages containing p2m entries */
68static unsigned long *p2m_top[TOP_ENTRIES]
69 __attribute__((section(".data.page_aligned"))) =
70 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010071
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010072/* Arrays of p2m arrays expressed in mfns used for save/restore */
73static unsigned long p2m_top_mfn[TOP_ENTRIES]
74 __attribute__((section(".bss.page_aligned")));
75
Ingo Molnarb20aecc2008-05-28 14:24:38 +020076static unsigned long p2m_top_mfn_list[
77 PAGE_ALIGN(TOP_ENTRIES / P2M_ENTRIES_PER_PAGE)]
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010078 __attribute__((section(".bss.page_aligned")));
79
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010080static inline unsigned p2m_top_index(unsigned long pfn)
81{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010082 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010083 return pfn / P2M_ENTRIES_PER_PAGE;
84}
85
86static inline unsigned p2m_index(unsigned long pfn)
87{
88 return pfn % P2M_ENTRIES_PER_PAGE;
89}
90
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010091/* Build the parallel p2m_top_mfn structures */
92void xen_setup_mfn_list_list(void)
93{
94 unsigned pfn, idx;
95
96 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
97 unsigned topidx = p2m_top_index(pfn);
98
99 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
100 }
101
102 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
103 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
104 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
105 }
106
107 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
108
109 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
110 virt_to_mfn(p2m_top_mfn_list);
111 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
112}
113
114/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100115void __init xen_build_dynamic_phys_to_machine(void)
116{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100117 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100118 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100119 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100120
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100121 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100122 unsigned topidx = p2m_top_index(pfn);
123
124 p2m_top[topidx] = &mfn_list[pfn];
125 }
126}
127
128unsigned long get_phys_to_machine(unsigned long pfn)
129{
130 unsigned topidx, idx;
131
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100132 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
133 return INVALID_P2M_ENTRY;
134
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100135 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100136 idx = p2m_index(pfn);
137 return p2m_top[topidx][idx];
138}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200139EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100140
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100141static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100142{
143 unsigned long *p;
144 unsigned i;
145
146 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
147 BUG_ON(p == NULL);
148
149 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
150 p[i] = INVALID_P2M_ENTRY;
151
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100152 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100153 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100154 else
155 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100156}
157
158void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
159{
160 unsigned topidx, idx;
161
162 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
163 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
164 return;
165 }
166
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100167 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
168 BUG_ON(mfn != INVALID_P2M_ENTRY);
169 return;
170 }
171
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100172 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100173 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100174 /* no need to allocate a page to store an invalid entry */
175 if (mfn == INVALID_P2M_ENTRY)
176 return;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100177 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100178 }
179
180 idx = p2m_index(pfn);
181 p2m_top[topidx][idx] = mfn;
182}
183
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700184xmaddr_t arbitrary_virt_to_machine(unsigned long address)
185{
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100186 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +0100187 pte_t *pte = lookup_address(address, &level);
Jan Beulichde067812008-05-15 13:24:52 +0100188 unsigned offset = address & ~PAGE_MASK;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700189
190 BUG_ON(pte == NULL);
191
192 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
193}
194
195void make_lowmem_page_readonly(void *vaddr)
196{
197 pte_t *pte, ptev;
198 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100199 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700200
Ingo Molnarf0646e42008-01-30 13:33:43 +0100201 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700202 BUG_ON(pte == NULL);
203
204 ptev = pte_wrprotect(*pte);
205
206 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
207 BUG();
208}
209
210void make_lowmem_page_readwrite(void *vaddr)
211{
212 pte_t *pte, ptev;
213 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100214 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700215
Ingo Molnarf0646e42008-01-30 13:33:43 +0100216 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700217 BUG_ON(pte == NULL);
218
219 ptev = pte_mkwrite(*pte);
220
221 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
222 BUG();
223}
224
225
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100226static bool page_pinned(void *ptr)
227{
228 struct page *page = virt_to_page(ptr);
229
230 return PagePinned(page);
231}
232
233void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700234{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700235 struct multicall_space mcs;
236 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700237
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700238 preempt_disable();
239
240 mcs = xen_mc_entry(sizeof(*u));
241 u = mcs.args;
242 u->ptr = virt_to_machine(ptr).maddr;
243 u->val = pmd_val_ma(val);
244 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
245
246 xen_mc_issue(PARAVIRT_LAZY_MMU);
247
248 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700249}
250
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100251void xen_set_pmd(pmd_t *ptr, pmd_t val)
252{
253 /* If page is not pinned, we can just update the entry
254 directly */
255 if (!page_pinned(ptr)) {
256 *ptr = val;
257 return;
258 }
259
260 xen_set_pmd_hyper(ptr, val);
261}
262
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700263/*
264 * Associate a virtual page frame with a given physical page frame
265 * and protection flags for that frame.
266 */
267void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
268{
269 pgd_t *pgd;
270 pud_t *pud;
271 pmd_t *pmd;
272 pte_t *pte;
273
274 pgd = swapper_pg_dir + pgd_index(vaddr);
275 if (pgd_none(*pgd)) {
276 BUG();
277 return;
278 }
279 pud = pud_offset(pgd, vaddr);
280 if (pud_none(*pud)) {
281 BUG();
282 return;
283 }
284 pmd = pmd_offset(pud, vaddr);
285 if (pmd_none(*pmd)) {
286 BUG();
287 return;
288 }
289 pte = pte_offset_kernel(pmd, vaddr);
290 /* <mfn,flags> stored as-is, to permit clearing entries */
291 xen_set_pte(pte, mfn_pte(mfn, flags));
292
293 /*
294 * It's enough to flush this one mapping.
295 * (PGE mappings get flushed as well)
296 */
297 __flush_tlb_one(vaddr);
298}
299
300void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
301 pte_t *ptep, pte_t pteval)
302{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700303 /* updates to init_mm may be done without lock */
304 if (mm == &init_mm)
305 preempt_disable();
306
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700307 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700308 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700309 struct multicall_space mcs;
310 mcs = xen_mc_entry(0);
311
312 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
313 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700314 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700315 } else
316 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700317 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700318 }
319 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700320
321out:
322 if (mm == &init_mm)
323 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700324}
325
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700326pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
327{
328 /* Just return the pte as-is. We preserve the bits on commit */
329 return *ptep;
330}
331
332void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
333 pte_t *ptep, pte_t pte)
334{
335 struct multicall_space mcs;
336 struct mmu_update *u;
337
338 mcs = xen_mc_entry(sizeof(*u));
339 u = mcs.args;
340 u->ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
341 u->val = pte_val_ma(pte);
342 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
343
344 xen_mc_issue(PARAVIRT_LAZY_MMU);
345}
346
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700347/* Assume pteval_t is equivalent to all the other *val_t types. */
348static pteval_t pte_mfn_to_pfn(pteval_t val)
349{
350 if (val & _PAGE_PRESENT) {
351 unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
352 pteval_t flags = val & ~PTE_MASK;
353 val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
354 }
355
356 return val;
357}
358
359static pteval_t pte_pfn_to_mfn(pteval_t val)
360{
361 if (val & _PAGE_PRESENT) {
362 unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
363 pteval_t flags = val & ~PTE_MASK;
364 val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
365 }
366
367 return val;
368}
369
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700370pteval_t xen_pte_val(pte_t pte)
371{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700372 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700373}
374
375pgdval_t xen_pgd_val(pgd_t pgd)
376{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700377 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700378}
379
380pte_t xen_make_pte(pteval_t pte)
381{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700382 pte = pte_pfn_to_mfn(pte);
383 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700384}
385
386pgd_t xen_make_pgd(pgdval_t pgd)
387{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700388 pgd = pte_pfn_to_mfn(pgd);
389 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700390}
391
392pmdval_t xen_pmd_val(pmd_t pmd)
393{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700394 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700395}
Jeremy Fitzhardinge3843fc22008-05-09 12:05:57 +0100396
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100397void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700398{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700399 struct multicall_space mcs;
400 struct mmu_update *u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700401
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700402 preempt_disable();
403
404 mcs = xen_mc_entry(sizeof(*u));
405 u = mcs.args;
406 u->ptr = virt_to_machine(ptr).maddr;
407 u->val = pud_val_ma(val);
408 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
409
410 xen_mc_issue(PARAVIRT_LAZY_MMU);
411
412 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700413}
414
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100415void xen_set_pud(pud_t *ptr, pud_t val)
416{
417 /* If page is not pinned, we can just update the entry
418 directly */
419 if (!page_pinned(ptr)) {
420 *ptr = val;
421 return;
422 }
423
424 xen_set_pud_hyper(ptr, val);
425}
426
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700427void xen_set_pte(pte_t *ptep, pte_t pte)
428{
429 ptep->pte_high = pte.pte_high;
430 smp_wmb();
431 ptep->pte_low = pte.pte_low;
432}
433
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700434void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
435{
436 set_64bit((u64 *)ptep, pte_val_ma(pte));
437}
438
439void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
440{
441 ptep->pte_low = 0;
442 smp_wmb(); /* make sure low gets written first */
443 ptep->pte_high = 0;
444}
445
446void xen_pmd_clear(pmd_t *pmdp)
447{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100448 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700449}
450
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700451pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700452{
Jeremy Fitzhardingea987b162008-06-16 15:01:56 -0700453 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700454 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700455}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700456
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700457/*
458 (Yet another) pagetable walker. This one is intended for pinning a
459 pagetable. This means that it walks a pagetable and calls the
460 callback function on each page it finds making up the page table,
461 at every level. It walks the entire pagetable, but it only bothers
462 pinning pte pages which are below pte_limit. In the normal case
463 this will be TASK_SIZE, but at boot we need to pin up to
464 FIXADDR_TOP. But the important bit is that we don't pin beyond
465 there, because then we start getting into Xen's ptes.
466*/
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700467static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700468 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700469{
470 pgd_t *pgd = pgd_base;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700471 int flush = 0;
472 unsigned long addr = 0;
473 unsigned long pgd_next;
474
475 BUG_ON(limit > FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700476
477 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700478 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700479
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700480 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
481 pud_t *pud;
482 unsigned long pud_limit, pud_next;
483
484 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
485
486 if (!pgd_val(*pgd))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700487 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700488
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700489 pud = pud_offset(pgd, 0);
490
491 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700492 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700493
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700494 for (; addr != pud_limit; pud++, addr = pud_next) {
495 pmd_t *pmd;
496 unsigned long pmd_limit;
497
498 pud_next = pud_addr_end(addr, pud_limit);
499
500 if (pud_next < limit)
501 pmd_limit = pud_next;
502 else
503 pmd_limit = limit;
504
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700505 if (pud_none(*pud))
506 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700507
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700508 pmd = pmd_offset(pud, 0);
509
510 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700511 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700512
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700513 for (; addr != pmd_limit; pmd++) {
514 addr += (PAGE_SIZE * PTRS_PER_PTE);
515 if ((pmd_limit-1) < (addr-1)) {
516 addr = pmd_limit;
517 break;
518 }
519
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700520 if (pmd_none(*pmd))
521 continue;
522
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700523 flush |= (*func)(pmd_page(*pmd), PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700524 }
525 }
526 }
527
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700528 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700529
530 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700531}
532
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700533static spinlock_t *lock_pte(struct page *page)
534{
535 spinlock_t *ptl = NULL;
536
537#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
538 ptl = __pte_lockptr(page);
539 spin_lock(ptl);
540#endif
541
542 return ptl;
543}
544
545static void do_unlock(void *v)
546{
547 spinlock_t *ptl = v;
548 spin_unlock(ptl);
549}
550
551static void xen_do_pin(unsigned level, unsigned long pfn)
552{
553 struct mmuext_op *op;
554 struct multicall_space mcs;
555
556 mcs = __xen_mc_entry(sizeof(*op));
557 op = mcs.args;
558 op->cmd = level;
559 op->arg1.mfn = pfn_to_mfn(pfn);
560 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
561}
562
563static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700564{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700565 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700566 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700567
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700568 if (pgfl)
569 flush = 0; /* already pinned */
570 else if (PageHighMem(page))
571 /* kmaps need flushing if we found an unpinned
572 highpage */
573 flush = 1;
574 else {
575 void *pt = lowmem_page_address(page);
576 unsigned long pfn = page_to_pfn(page);
577 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700578 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700579
580 flush = 0;
581
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700582 ptl = NULL;
583 if (level == PT_PTE)
584 ptl = lock_pte(page);
585
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700586 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
587 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700588 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
589
590 if (level == PT_PTE)
591 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
592
593 if (ptl) {
594 /* Queue a deferred unlock for when this batch
595 is completed. */
596 xen_mc_callback(do_unlock, ptl);
597 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700598 }
599
600 return flush;
601}
602
603/* This is called just after a mm has been created, but it has not
604 been used yet. We need to make sure that its pagetable is all
605 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700606void xen_pgd_pin(pgd_t *pgd)
607{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700608 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700609
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700610 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
611 /* re-enable interrupts for kmap_flush_unused */
612 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700613 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700614 xen_mc_batch();
615 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700616
Jeremy Fitzhardinge3843fc22008-05-09 12:05:57 +0100617 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700618 xen_mc_issue(0);
619}
620
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100621/*
622 * On save, we need to pin all pagetables to make sure they get their
623 * mfns turned into pfns. Search the list for any unpinned pgds and pin
624 * them (unpinned pgds are not currently in use, probably because the
625 * process is under construction or destruction).
626 */
627void xen_mm_pin_all(void)
628{
629 unsigned long flags;
630 struct page *page;
631
632 spin_lock_irqsave(&pgd_lock, flags);
633
634 list_for_each_entry(page, &pgd_list, lru) {
635 if (!PagePinned(page)) {
636 xen_pgd_pin((pgd_t *)page_address(page));
637 SetPageSavePinned(page);
638 }
639 }
640
641 spin_unlock_irqrestore(&pgd_lock, flags);
642}
643
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700644/* The init_mm pagetable is really pinned as soon as its created, but
645 that's before we have page structures to store the bits. So do all
646 the book-keeping now. */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700647static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700648{
649 SetPagePinned(page);
650 return 0;
651}
652
653void __init xen_mark_init_mm_pinned(void)
654{
655 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
656}
657
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700658static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700659{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700660 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700661
662 if (pgfl && !PageHighMem(page)) {
663 void *pt = lowmem_page_address(page);
664 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700665 spinlock_t *ptl = NULL;
666 struct multicall_space mcs;
667
668 if (level == PT_PTE) {
669 ptl = lock_pte(page);
670
671 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
672 }
673
674 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700675
676 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
677 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700678 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
679
680 if (ptl) {
681 /* unlock when batch completed */
682 xen_mc_callback(do_unlock, ptl);
683 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700684 }
685
686 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700687}
688
689/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700690static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700691{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700692 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700693
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700694 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700695
696 pgd_walk(pgd, unpin_page, TASK_SIZE);
697
698 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700699}
700
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100701/*
702 * On resume, undo any pinning done at save, so that the rest of the
703 * kernel doesn't see any unexpected pinned pagetables.
704 */
705void xen_mm_unpin_all(void)
706{
707 unsigned long flags;
708 struct page *page;
709
710 spin_lock_irqsave(&pgd_lock, flags);
711
712 list_for_each_entry(page, &pgd_list, lru) {
713 if (PageSavePinned(page)) {
714 BUG_ON(!PagePinned(page));
715 printk("unpinning pinned %p\n", page_address(page));
716 xen_pgd_unpin((pgd_t *)page_address(page));
717 ClearPageSavePinned(page);
718 }
719 }
720
721 spin_unlock_irqrestore(&pgd_lock, flags);
722}
723
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700724void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
725{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700726 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700727 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700728 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700729}
730
731void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
732{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700733 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700734 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700735 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700736}
737
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700738
739#ifdef CONFIG_SMP
740/* Another cpu may still have their %cr3 pointing at the pagetable, so
741 we need to repoint it somewhere else before we can unpin it. */
742static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700743{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700744 struct mm_struct *mm = info;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700745
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700746 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
747 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700748
749 /* If this cpu still has a stale cr3 reference, then make sure
750 it has been flushed. */
751 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
752 load_cr3(swapper_pg_dir);
753 arch_flush_lazy_cpu_mode();
754 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700755}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700756
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700757static void drop_mm_ref(struct mm_struct *mm)
758{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700759 cpumask_t mask;
760 unsigned cpu;
761
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700762 if (current->active_mm == mm) {
763 if (current->mm == mm)
764 load_cr3(swapper_pg_dir);
765 else
766 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700767 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700768 }
769
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700770 /* Get the "official" set of cpus referring to our pagetable. */
771 mask = mm->cpu_vm_mask;
772
773 /* It's possible that a vcpu may have a stale reference to our
774 cr3, because its in lazy mode, and it hasn't yet flushed
775 its set of pending hypercalls yet. In this case, we can
776 look at its actual current cr3 value, and force it to flush
777 if needed. */
778 for_each_online_cpu(cpu) {
779 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
780 cpu_set(cpu, mask);
781 }
782
783 if (!cpus_empty(mask))
784 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700785}
786#else
787static void drop_mm_ref(struct mm_struct *mm)
788{
789 if (current->active_mm == mm)
790 load_cr3(swapper_pg_dir);
791}
792#endif
793
794/*
795 * While a process runs, Xen pins its pagetables, which means that the
796 * hypervisor forces it to be read-only, and it controls all updates
797 * to it. This means that all pagetable updates have to go via the
798 * hypervisor, which is moderately expensive.
799 *
800 * Since we're pulling the pagetable down, we switch to use init_mm,
801 * unpin old process pagetable and mark it all read-write, which
802 * allows further operations on it to be simple memory accesses.
803 *
804 * The only subtle point is that another CPU may be still using the
805 * pagetable because of lazy tlb flushing. This means we need need to
806 * switch all CPUs off this pagetable before we can unpin it.
807 */
808void xen_exit_mmap(struct mm_struct *mm)
809{
810 get_cpu(); /* make sure we don't move around */
811 drop_mm_ref(mm);
812 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700813
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700814 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700815
816 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100817 if (page_pinned(mm->pgd))
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700818 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700819
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700820 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700821}