blob: 07c2653ec33576173a2fa54d836e019e48e9d58e [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070048#include <asm/paravirt.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070051#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <xen/page.h>
54#include <xen/interface/xen.h>
55
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070056#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include "mmu.h"
58
59xmaddr_t arbitrary_virt_to_machine(unsigned long address)
60{
Harvey Harrisonda7bfc52008-02-09 23:24:08 +010061 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +010062 pte_t *pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070063 unsigned offset = address & PAGE_MASK;
64
65 BUG_ON(pte == NULL);
66
67 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
68}
69
70void make_lowmem_page_readonly(void *vaddr)
71{
72 pte_t *pte, ptev;
73 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +010074 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070075
Ingo Molnarf0646e42008-01-30 13:33:43 +010076 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070077 BUG_ON(pte == NULL);
78
79 ptev = pte_wrprotect(*pte);
80
81 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
82 BUG();
83}
84
85void make_lowmem_page_readwrite(void *vaddr)
86{
87 pte_t *pte, ptev;
88 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +010089 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070090
Ingo Molnarf0646e42008-01-30 13:33:43 +010091 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070092 BUG_ON(pte == NULL);
93
94 ptev = pte_mkwrite(*pte);
95
96 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
97 BUG();
98}
99
100
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700101void xen_set_pmd(pmd_t *ptr, pmd_t val)
102{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700103 struct multicall_space mcs;
104 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700105
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700106 preempt_disable();
107
108 mcs = xen_mc_entry(sizeof(*u));
109 u = mcs.args;
110 u->ptr = virt_to_machine(ptr).maddr;
111 u->val = pmd_val_ma(val);
112 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
113
114 xen_mc_issue(PARAVIRT_LAZY_MMU);
115
116 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700117}
118
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700119/*
120 * Associate a virtual page frame with a given physical page frame
121 * and protection flags for that frame.
122 */
123void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
124{
125 pgd_t *pgd;
126 pud_t *pud;
127 pmd_t *pmd;
128 pte_t *pte;
129
130 pgd = swapper_pg_dir + pgd_index(vaddr);
131 if (pgd_none(*pgd)) {
132 BUG();
133 return;
134 }
135 pud = pud_offset(pgd, vaddr);
136 if (pud_none(*pud)) {
137 BUG();
138 return;
139 }
140 pmd = pmd_offset(pud, vaddr);
141 if (pmd_none(*pmd)) {
142 BUG();
143 return;
144 }
145 pte = pte_offset_kernel(pmd, vaddr);
146 /* <mfn,flags> stored as-is, to permit clearing entries */
147 xen_set_pte(pte, mfn_pte(mfn, flags));
148
149 /*
150 * It's enough to flush this one mapping.
151 * (PGE mappings get flushed as well)
152 */
153 __flush_tlb_one(vaddr);
154}
155
156void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
157 pte_t *ptep, pte_t pteval)
158{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700159 /* updates to init_mm may be done without lock */
160 if (mm == &init_mm)
161 preempt_disable();
162
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700163 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700164 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700165 struct multicall_space mcs;
166 mcs = xen_mc_entry(0);
167
168 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
169 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700170 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700171 } else
172 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700173 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700174 }
175 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700176
177out:
178 if (mm == &init_mm)
179 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700180}
181
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700182pteval_t xen_pte_val(pte_t pte)
183{
184 pteval_t ret = pte.pte;
185
186 if (ret & _PAGE_PRESENT)
187 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
188
189 return ret;
190}
191
192pgdval_t xen_pgd_val(pgd_t pgd)
193{
194 pgdval_t ret = pgd.pgd;
195 if (ret & _PAGE_PRESENT)
196 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
197 return ret;
198}
199
200pte_t xen_make_pte(pteval_t pte)
201{
202 if (pte & _PAGE_PRESENT) {
203 pte = phys_to_machine(XPADDR(pte)).maddr;
204 pte &= ~(_PAGE_PCD | _PAGE_PWT);
205 }
206
207 return (pte_t){ .pte = pte };
208}
209
210pgd_t xen_make_pgd(pgdval_t pgd)
211{
212 if (pgd & _PAGE_PRESENT)
213 pgd = phys_to_machine(XPADDR(pgd)).maddr;
214
215 return (pgd_t){ pgd };
216}
217
218pmdval_t xen_pmd_val(pmd_t pmd)
219{
220 pmdval_t ret = native_pmd_val(pmd);
221 if (ret & _PAGE_PRESENT)
222 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
223 return ret;
224}
Jeremy Fitzhardinge3843fc22008-05-09 12:05:57 +0100225
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700226void xen_set_pud(pud_t *ptr, pud_t val)
227{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700228 struct multicall_space mcs;
229 struct mmu_update *u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700230
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700231 preempt_disable();
232
233 mcs = xen_mc_entry(sizeof(*u));
234 u = mcs.args;
235 u->ptr = virt_to_machine(ptr).maddr;
236 u->val = pud_val_ma(val);
237 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
238
239 xen_mc_issue(PARAVIRT_LAZY_MMU);
240
241 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700242}
243
244void xen_set_pte(pte_t *ptep, pte_t pte)
245{
246 ptep->pte_high = pte.pte_high;
247 smp_wmb();
248 ptep->pte_low = pte.pte_low;
249}
250
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700251void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
252{
253 set_64bit((u64 *)ptep, pte_val_ma(pte));
254}
255
256void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
257{
258 ptep->pte_low = 0;
259 smp_wmb(); /* make sure low gets written first */
260 ptep->pte_high = 0;
261}
262
263void xen_pmd_clear(pmd_t *pmdp)
264{
265 xen_set_pmd(pmdp, __pmd(0));
266}
267
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700268pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700269{
Jeremy Fitzhardinge430442e2008-03-17 16:37:08 -0700270 if (pmd & _PAGE_PRESENT)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700271 pmd = phys_to_machine(XPADDR(pmd)).maddr;
272
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700273 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700274}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700275
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700276/*
277 (Yet another) pagetable walker. This one is intended for pinning a
278 pagetable. This means that it walks a pagetable and calls the
279 callback function on each page it finds making up the page table,
280 at every level. It walks the entire pagetable, but it only bothers
281 pinning pte pages which are below pte_limit. In the normal case
282 this will be TASK_SIZE, but at boot we need to pin up to
283 FIXADDR_TOP. But the important bit is that we don't pin beyond
284 there, because then we start getting into Xen's ptes.
285*/
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700286static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700287 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700288{
289 pgd_t *pgd = pgd_base;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700290 int flush = 0;
291 unsigned long addr = 0;
292 unsigned long pgd_next;
293
294 BUG_ON(limit > FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700295
296 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700297 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700298
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700299 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
300 pud_t *pud;
301 unsigned long pud_limit, pud_next;
302
303 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
304
305 if (!pgd_val(*pgd))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700306 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700307
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700308 pud = pud_offset(pgd, 0);
309
310 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700311 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700312
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700313 for (; addr != pud_limit; pud++, addr = pud_next) {
314 pmd_t *pmd;
315 unsigned long pmd_limit;
316
317 pud_next = pud_addr_end(addr, pud_limit);
318
319 if (pud_next < limit)
320 pmd_limit = pud_next;
321 else
322 pmd_limit = limit;
323
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700324 if (pud_none(*pud))
325 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700326
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700327 pmd = pmd_offset(pud, 0);
328
329 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700330 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700331
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700332 for (; addr != pmd_limit; pmd++) {
333 addr += (PAGE_SIZE * PTRS_PER_PTE);
334 if ((pmd_limit-1) < (addr-1)) {
335 addr = pmd_limit;
336 break;
337 }
338
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700339 if (pmd_none(*pmd))
340 continue;
341
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700342 flush |= (*func)(pmd_page(*pmd), PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700343 }
344 }
345 }
346
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700347 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700348
349 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700350}
351
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700352static spinlock_t *lock_pte(struct page *page)
353{
354 spinlock_t *ptl = NULL;
355
356#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
357 ptl = __pte_lockptr(page);
358 spin_lock(ptl);
359#endif
360
361 return ptl;
362}
363
364static void do_unlock(void *v)
365{
366 spinlock_t *ptl = v;
367 spin_unlock(ptl);
368}
369
370static void xen_do_pin(unsigned level, unsigned long pfn)
371{
372 struct mmuext_op *op;
373 struct multicall_space mcs;
374
375 mcs = __xen_mc_entry(sizeof(*op));
376 op = mcs.args;
377 op->cmd = level;
378 op->arg1.mfn = pfn_to_mfn(pfn);
379 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
380}
381
382static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700383{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700384 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700385 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700386
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700387 if (pgfl)
388 flush = 0; /* already pinned */
389 else if (PageHighMem(page))
390 /* kmaps need flushing if we found an unpinned
391 highpage */
392 flush = 1;
393 else {
394 void *pt = lowmem_page_address(page);
395 unsigned long pfn = page_to_pfn(page);
396 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700397 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700398
399 flush = 0;
400
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700401 ptl = NULL;
402 if (level == PT_PTE)
403 ptl = lock_pte(page);
404
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700405 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
406 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700407 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
408
409 if (level == PT_PTE)
410 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
411
412 if (ptl) {
413 /* Queue a deferred unlock for when this batch
414 is completed. */
415 xen_mc_callback(do_unlock, ptl);
416 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700417 }
418
419 return flush;
420}
421
422/* This is called just after a mm has been created, but it has not
423 been used yet. We need to make sure that its pagetable is all
424 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700425void xen_pgd_pin(pgd_t *pgd)
426{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700427 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700428
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700429 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
430 /* re-enable interrupts for kmap_flush_unused */
431 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700432 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700433 xen_mc_batch();
434 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700435
Jeremy Fitzhardinge3843fc22008-05-09 12:05:57 +0100436 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700437 xen_mc_issue(0);
438}
439
440/* The init_mm pagetable is really pinned as soon as its created, but
441 that's before we have page structures to store the bits. So do all
442 the book-keeping now. */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700443static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700444{
445 SetPagePinned(page);
446 return 0;
447}
448
449void __init xen_mark_init_mm_pinned(void)
450{
451 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
452}
453
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700454static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700455{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700456 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700457
458 if (pgfl && !PageHighMem(page)) {
459 void *pt = lowmem_page_address(page);
460 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700461 spinlock_t *ptl = NULL;
462 struct multicall_space mcs;
463
464 if (level == PT_PTE) {
465 ptl = lock_pte(page);
466
467 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
468 }
469
470 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700471
472 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
473 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700474 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
475
476 if (ptl) {
477 /* unlock when batch completed */
478 xen_mc_callback(do_unlock, ptl);
479 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700480 }
481
482 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700483}
484
485/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700486static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700487{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700488 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700489
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700490 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700491
492 pgd_walk(pgd, unpin_page, TASK_SIZE);
493
494 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700495}
496
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700497void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
498{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700499 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700500 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700501 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700502}
503
504void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
505{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700506 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700507 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700508 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700509}
510
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700511
512#ifdef CONFIG_SMP
513/* Another cpu may still have their %cr3 pointing at the pagetable, so
514 we need to repoint it somewhere else before we can unpin it. */
515static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700516{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700517 struct mm_struct *mm = info;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700518
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700519 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
520 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700521
522 /* If this cpu still has a stale cr3 reference, then make sure
523 it has been flushed. */
524 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
525 load_cr3(swapper_pg_dir);
526 arch_flush_lazy_cpu_mode();
527 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700528}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700529
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700530static void drop_mm_ref(struct mm_struct *mm)
531{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700532 cpumask_t mask;
533 unsigned cpu;
534
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700535 if (current->active_mm == mm) {
536 if (current->mm == mm)
537 load_cr3(swapper_pg_dir);
538 else
539 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700540 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700541 }
542
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700543 /* Get the "official" set of cpus referring to our pagetable. */
544 mask = mm->cpu_vm_mask;
545
546 /* It's possible that a vcpu may have a stale reference to our
547 cr3, because its in lazy mode, and it hasn't yet flushed
548 its set of pending hypercalls yet. In this case, we can
549 look at its actual current cr3 value, and force it to flush
550 if needed. */
551 for_each_online_cpu(cpu) {
552 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
553 cpu_set(cpu, mask);
554 }
555
556 if (!cpus_empty(mask))
557 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700558}
559#else
560static void drop_mm_ref(struct mm_struct *mm)
561{
562 if (current->active_mm == mm)
563 load_cr3(swapper_pg_dir);
564}
565#endif
566
567/*
568 * While a process runs, Xen pins its pagetables, which means that the
569 * hypervisor forces it to be read-only, and it controls all updates
570 * to it. This means that all pagetable updates have to go via the
571 * hypervisor, which is moderately expensive.
572 *
573 * Since we're pulling the pagetable down, we switch to use init_mm,
574 * unpin old process pagetable and mark it all read-write, which
575 * allows further operations on it to be simple memory accesses.
576 *
577 * The only subtle point is that another CPU may be still using the
578 * pagetable because of lazy tlb flushing. This means we need need to
579 * switch all CPUs off this pagetable before we can unpin it.
580 */
581void xen_exit_mmap(struct mm_struct *mm)
582{
583 get_cpu(); /* make sure we don't move around */
584 drop_mm_ref(mm);
585 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700586
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700587 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700588
589 /* pgd may not be pinned in the error exit path of execve */
590 if (PagePinned(virt_to_page(mm->pgd)))
591 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700592
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700593 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700594}