blob: 046c1f23dd6e02ea01795b8269ba827a8caadf56 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070047#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070048#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070049#include <asm/paravirt.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070050#include <asm/linkage.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070051
52#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070053#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070054
55#include <xen/page.h>
56#include <xen/interface/xen.h>
57
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070058#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070059#include "mmu.h"
60
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010061#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010062#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010063
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010064/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070065static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010066 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
67
68 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070069static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010070 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010071
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010072/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070073static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010074
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070075static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
76 __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010077
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010078static inline unsigned p2m_top_index(unsigned long pfn)
79{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010080 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010081 return pfn / P2M_ENTRIES_PER_PAGE;
82}
83
84static inline unsigned p2m_index(unsigned long pfn)
85{
86 return pfn % P2M_ENTRIES_PER_PAGE;
87}
88
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010089/* Build the parallel p2m_top_mfn structures */
90void xen_setup_mfn_list_list(void)
91{
92 unsigned pfn, idx;
93
94 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
95 unsigned topidx = p2m_top_index(pfn);
96
97 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
98 }
99
100 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
101 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
102 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
103 }
104
105 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
106
107 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
108 virt_to_mfn(p2m_top_mfn_list);
109 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
110}
111
112/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100113void __init xen_build_dynamic_phys_to_machine(void)
114{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100115 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100116 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100117 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100118
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100119 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100120 unsigned topidx = p2m_top_index(pfn);
121
122 p2m_top[topidx] = &mfn_list[pfn];
123 }
124}
125
126unsigned long get_phys_to_machine(unsigned long pfn)
127{
128 unsigned topidx, idx;
129
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100130 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
131 return INVALID_P2M_ENTRY;
132
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100133 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100134 idx = p2m_index(pfn);
135 return p2m_top[topidx][idx];
136}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200137EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100138
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100139static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100140{
141 unsigned long *p;
142 unsigned i;
143
144 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
145 BUG_ON(p == NULL);
146
147 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
148 p[i] = INVALID_P2M_ENTRY;
149
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100150 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100151 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100152 else
153 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100154}
155
156void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
157{
158 unsigned topidx, idx;
159
160 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
161 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
162 return;
163 }
164
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100165 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
166 BUG_ON(mfn != INVALID_P2M_ENTRY);
167 return;
168 }
169
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100170 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100171 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100172 /* no need to allocate a page to store an invalid entry */
173 if (mfn == INVALID_P2M_ENTRY)
174 return;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100175 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100176 }
177
178 idx = p2m_index(pfn);
179 p2m_top[topidx][idx] = mfn;
180}
181
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700182xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700183{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700184 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100185 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +0100186 pte_t *pte = lookup_address(address, &level);
Jan Beulichde067812008-05-15 13:24:52 +0100187 unsigned offset = address & ~PAGE_MASK;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700188
189 BUG_ON(pte == NULL);
190
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700191 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700192}
193
194void make_lowmem_page_readonly(void *vaddr)
195{
196 pte_t *pte, ptev;
197 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100198 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700199
Ingo Molnarf0646e42008-01-30 13:33:43 +0100200 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700201 BUG_ON(pte == NULL);
202
203 ptev = pte_wrprotect(*pte);
204
205 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
206 BUG();
207}
208
209void make_lowmem_page_readwrite(void *vaddr)
210{
211 pte_t *pte, ptev;
212 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100213 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700214
Ingo Molnarf0646e42008-01-30 13:33:43 +0100215 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700216 BUG_ON(pte == NULL);
217
218 ptev = pte_mkwrite(*pte);
219
220 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
221 BUG();
222}
223
224
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100225static bool page_pinned(void *ptr)
226{
227 struct page *page = virt_to_page(ptr);
228
229 return PagePinned(page);
230}
231
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700232static void extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700233{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700234 struct multicall_space mcs;
235 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700236
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700237 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
238
239 if (mcs.mc != NULL)
240 mcs.mc->args[1]++;
241 else {
242 mcs = __xen_mc_entry(sizeof(*u));
243 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
244 }
245
246 u = mcs.args;
247 *u = *update;
248}
249
250void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
251{
252 struct mmu_update u;
253
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700254 preempt_disable();
255
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700256 xen_mc_batch();
257
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700258 /* ptr may be ioremapped for 64-bit pagetable setup */
259 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700260 u.val = pmd_val_ma(val);
261 extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700262
263 xen_mc_issue(PARAVIRT_LAZY_MMU);
264
265 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700266}
267
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100268void xen_set_pmd(pmd_t *ptr, pmd_t val)
269{
270 /* If page is not pinned, we can just update the entry
271 directly */
272 if (!page_pinned(ptr)) {
273 *ptr = val;
274 return;
275 }
276
277 xen_set_pmd_hyper(ptr, val);
278}
279
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700280/*
281 * Associate a virtual page frame with a given physical page frame
282 * and protection flags for that frame.
283 */
284void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
285{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700286 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700287}
288
289void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
290 pte_t *ptep, pte_t pteval)
291{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700292 /* updates to init_mm may be done without lock */
293 if (mm == &init_mm)
294 preempt_disable();
295
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700296 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700297 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700298 struct multicall_space mcs;
299 mcs = xen_mc_entry(0);
300
301 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
302 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700303 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700304 } else
305 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700306 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700307 }
308 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700309
310out:
311 if (mm == &init_mm)
312 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700313}
314
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700315pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
316{
317 /* Just return the pte as-is. We preserve the bits on commit */
318 return *ptep;
319}
320
321void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
322 pte_t *ptep, pte_t pte)
323{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700324 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700325
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700326 xen_mc_batch();
327
328 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
329 u.val = pte_val_ma(pte);
330 extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700331
332 xen_mc_issue(PARAVIRT_LAZY_MMU);
333}
334
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700335/* Assume pteval_t is equivalent to all the other *val_t types. */
336static pteval_t pte_mfn_to_pfn(pteval_t val)
337{
338 if (val & _PAGE_PRESENT) {
339 unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
340 pteval_t flags = val & ~PTE_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700341 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700342 }
343
344 return val;
345}
346
347static pteval_t pte_pfn_to_mfn(pteval_t val)
348{
349 if (val & _PAGE_PRESENT) {
350 unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
351 pteval_t flags = val & ~PTE_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700352 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700353 }
354
355 return val;
356}
357
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700358pteval_t xen_pte_val(pte_t pte)
359{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700360 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700361}
362
363pgdval_t xen_pgd_val(pgd_t pgd)
364{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700365 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700366}
367
368pte_t xen_make_pte(pteval_t pte)
369{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700370 pte = pte_pfn_to_mfn(pte);
371 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700372}
373
374pgd_t xen_make_pgd(pgdval_t pgd)
375{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700376 pgd = pte_pfn_to_mfn(pgd);
377 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700378}
379
380pmdval_t xen_pmd_val(pmd_t pmd)
381{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700382 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700383}
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100384
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100385void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700386{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700387 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700388
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700389 preempt_disable();
390
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700391 xen_mc_batch();
392
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700393 /* ptr may be ioremapped for 64-bit pagetable setup */
394 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700395 u.val = pud_val_ma(val);
396 extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700397
398 xen_mc_issue(PARAVIRT_LAZY_MMU);
399
400 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700401}
402
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100403void xen_set_pud(pud_t *ptr, pud_t val)
404{
405 /* If page is not pinned, we can just update the entry
406 directly */
407 if (!page_pinned(ptr)) {
408 *ptr = val;
409 return;
410 }
411
412 xen_set_pud_hyper(ptr, val);
413}
414
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700415void xen_set_pte(pte_t *ptep, pte_t pte)
416{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700417#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700418 ptep->pte_high = pte.pte_high;
419 smp_wmb();
420 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700421#else
422 *ptep = pte;
423#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700424}
425
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700426#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700427void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
428{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700429 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700430}
431
432void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
433{
434 ptep->pte_low = 0;
435 smp_wmb(); /* make sure low gets written first */
436 ptep->pte_high = 0;
437}
438
439void xen_pmd_clear(pmd_t *pmdp)
440{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100441 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700442}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700443#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700444
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700445pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700446{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700447 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700448 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700449}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700450
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700451#if PAGETABLE_LEVELS == 4
452pudval_t xen_pud_val(pud_t pud)
453{
454 return pte_mfn_to_pfn(pud.pud);
455}
456
457pud_t xen_make_pud(pudval_t pud)
458{
459 pud = pte_pfn_to_mfn(pud);
460
461 return native_make_pud(pud);
462}
463
464void xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
465{
466 struct mmu_update u;
467
468 preempt_disable();
469
470 xen_mc_batch();
471
472 u.ptr = virt_to_machine(ptr).maddr;
473 u.val = pgd_val_ma(val);
474 extend_mmu_update(&u);
475
476 xen_mc_issue(PARAVIRT_LAZY_MMU);
477
478 preempt_enable();
479}
480
481void xen_set_pgd(pgd_t *ptr, pgd_t val)
482{
483 /* If page is not pinned, we can just update the entry
484 directly */
485 if (!page_pinned(ptr)) {
486 *ptr = val;
487 return;
488 }
489
490 xen_set_pgd_hyper(ptr, val);
491}
492#endif /* PAGETABLE_LEVELS == 4 */
493
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700494/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700495 * (Yet another) pagetable walker. This one is intended for pinning a
496 * pagetable. This means that it walks a pagetable and calls the
497 * callback function on each page it finds making up the page table,
498 * at every level. It walks the entire pagetable, but it only bothers
499 * pinning pte pages which are below limit. In the normal case this
500 * will be STACK_TOP_MAX, but at boot we need to pin up to
501 * FIXADDR_TOP.
502 *
503 * For 32-bit the important bit is that we don't pin beyond there,
504 * because then we start getting into Xen's ptes.
505 *
506 * For 64-bit, we must skip the Xen hole in the middle of the address
507 * space, just after the big x86-64 virtual hole.
508 */
509static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700510 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700511{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700512 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700513 unsigned hole_low, hole_high;
514 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
515 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700516
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700517 /* The limit is the last byte to be touched */
518 limit--;
519 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700520
521 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700522 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700523
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700524 /*
525 * 64-bit has a great big hole in the middle of the address
526 * space, which contains the Xen mappings. On 32-bit these
527 * will end up making a zero-sized hole and so is a no-op.
528 */
529 hole_low = pgd_index(STACK_TOP_MAX + PGDIR_SIZE - 1);
530 hole_high = pgd_index(PAGE_OFFSET);
531
532 pgdidx_limit = pgd_index(limit);
533#if PTRS_PER_PUD > 1
534 pudidx_limit = pud_index(limit);
535#else
536 pudidx_limit = 0;
537#endif
538#if PTRS_PER_PMD > 1
539 pmdidx_limit = pmd_index(limit);
540#else
541 pmdidx_limit = 0;
542#endif
543
544 flush |= (*func)(virt_to_page(pgd), PT_PGD);
545
546 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700547 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700548
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700549 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700550 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700551
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700552 if (!pgd_val(pgd[pgdidx]))
553 continue;
554
555 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700556
557 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700558 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700559
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700560 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700561 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700562
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700563 if (pgdidx == pgdidx_limit &&
564 pudidx > pudidx_limit)
565 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700566
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700567 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700568 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700569
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700570 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700571
572 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700573 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700574
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700575 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
576 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700577
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700578 if (pgdidx == pgdidx_limit &&
579 pudidx == pudidx_limit &&
580 pmdidx > pmdidx_limit)
581 goto out;
582
583 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700584 continue;
585
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700586 pte = pmd_page(pmd[pmdidx]);
587 flush |= (*func)(pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700588 }
589 }
590 }
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700591out:
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700592
593 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700594}
595
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700596static spinlock_t *lock_pte(struct page *page)
597{
598 spinlock_t *ptl = NULL;
599
600#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
601 ptl = __pte_lockptr(page);
602 spin_lock(ptl);
603#endif
604
605 return ptl;
606}
607
608static void do_unlock(void *v)
609{
610 spinlock_t *ptl = v;
611 spin_unlock(ptl);
612}
613
614static void xen_do_pin(unsigned level, unsigned long pfn)
615{
616 struct mmuext_op *op;
617 struct multicall_space mcs;
618
619 mcs = __xen_mc_entry(sizeof(*op));
620 op = mcs.args;
621 op->cmd = level;
622 op->arg1.mfn = pfn_to_mfn(pfn);
623 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
624}
625
626static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700627{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700628 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700629 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700630
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700631 if (pgfl)
632 flush = 0; /* already pinned */
633 else if (PageHighMem(page))
634 /* kmaps need flushing if we found an unpinned
635 highpage */
636 flush = 1;
637 else {
638 void *pt = lowmem_page_address(page);
639 unsigned long pfn = page_to_pfn(page);
640 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700641 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700642
643 flush = 0;
644
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700645 ptl = NULL;
646 if (level == PT_PTE)
647 ptl = lock_pte(page);
648
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700649 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
650 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700651 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
652
653 if (level == PT_PTE)
654 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
655
656 if (ptl) {
657 /* Queue a deferred unlock for when this batch
658 is completed. */
659 xen_mc_callback(do_unlock, ptl);
660 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700661 }
662
663 return flush;
664}
665
666/* This is called just after a mm has been created, but it has not
667 been used yet. We need to make sure that its pagetable is all
668 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700669void xen_pgd_pin(pgd_t *pgd)
670{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700671 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700672
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700673 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
674 /* re-enable interrupts for kmap_flush_unused */
675 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700676 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700677 xen_mc_batch();
678 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700679
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700680#ifdef CONFIG_X86_PAE
681 /* Need to make sure unshared kernel PMD is pinnable */
682 pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
683#endif
684
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100685 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700686 xen_mc_issue(0);
687}
688
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100689/*
690 * On save, we need to pin all pagetables to make sure they get their
691 * mfns turned into pfns. Search the list for any unpinned pgds and pin
692 * them (unpinned pgds are not currently in use, probably because the
693 * process is under construction or destruction).
694 */
695void xen_mm_pin_all(void)
696{
697 unsigned long flags;
698 struct page *page;
699
700 spin_lock_irqsave(&pgd_lock, flags);
701
702 list_for_each_entry(page, &pgd_list, lru) {
703 if (!PagePinned(page)) {
704 xen_pgd_pin((pgd_t *)page_address(page));
705 SetPageSavePinned(page);
706 }
707 }
708
709 spin_unlock_irqrestore(&pgd_lock, flags);
710}
711
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700712/*
713 * The init_mm pagetable is really pinned as soon as its created, but
714 * that's before we have page structures to store the bits. So do all
715 * the book-keeping now.
716 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700717static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700718{
719 SetPagePinned(page);
720 return 0;
721}
722
723void __init xen_mark_init_mm_pinned(void)
724{
725 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
726}
727
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700728static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700729{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700730 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700731
732 if (pgfl && !PageHighMem(page)) {
733 void *pt = lowmem_page_address(page);
734 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700735 spinlock_t *ptl = NULL;
736 struct multicall_space mcs;
737
738 if (level == PT_PTE) {
739 ptl = lock_pte(page);
740
741 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
742 }
743
744 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700745
746 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
747 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700748 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
749
750 if (ptl) {
751 /* unlock when batch completed */
752 xen_mc_callback(do_unlock, ptl);
753 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700754 }
755
756 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700757}
758
759/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700760static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700761{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700762 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700763
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700764 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700765
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700766#ifdef CONFIG_X86_PAE
767 /* Need to make sure unshared kernel PMD is unpinned */
768 pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
769#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700770 pgd_walk(pgd, unpin_page, TASK_SIZE);
771
772 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700773}
774
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100775/*
776 * On resume, undo any pinning done at save, so that the rest of the
777 * kernel doesn't see any unexpected pinned pagetables.
778 */
779void xen_mm_unpin_all(void)
780{
781 unsigned long flags;
782 struct page *page;
783
784 spin_lock_irqsave(&pgd_lock, flags);
785
786 list_for_each_entry(page, &pgd_list, lru) {
787 if (PageSavePinned(page)) {
788 BUG_ON(!PagePinned(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100789 xen_pgd_unpin((pgd_t *)page_address(page));
790 ClearPageSavePinned(page);
791 }
792 }
793
794 spin_unlock_irqrestore(&pgd_lock, flags);
795}
796
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700797void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
798{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700799 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700800 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700801 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700802}
803
804void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
805{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700806 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700807 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700808 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700809}
810
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700811
812#ifdef CONFIG_SMP
813/* Another cpu may still have their %cr3 pointing at the pagetable, so
814 we need to repoint it somewhere else before we can unpin it. */
815static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700816{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700817 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -0700818 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700819
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -0700820#ifdef CONFIG_X86_64
821 active_mm = read_pda(active_mm);
822#else
823 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
824#endif
825
826 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700827 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700828
829 /* If this cpu still has a stale cr3 reference, then make sure
830 it has been flushed. */
831 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
832 load_cr3(swapper_pg_dir);
833 arch_flush_lazy_cpu_mode();
834 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700835}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700836
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700837static void drop_mm_ref(struct mm_struct *mm)
838{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700839 cpumask_t mask;
840 unsigned cpu;
841
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700842 if (current->active_mm == mm) {
843 if (current->mm == mm)
844 load_cr3(swapper_pg_dir);
845 else
846 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700847 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700848 }
849
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700850 /* Get the "official" set of cpus referring to our pagetable. */
851 mask = mm->cpu_vm_mask;
852
853 /* It's possible that a vcpu may have a stale reference to our
854 cr3, because its in lazy mode, and it hasn't yet flushed
855 its set of pending hypercalls yet. In this case, we can
856 look at its actual current cr3 value, and force it to flush
857 if needed. */
858 for_each_online_cpu(cpu) {
859 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
860 cpu_set(cpu, mask);
861 }
862
863 if (!cpus_empty(mask))
Jens Axboe3b16cf82008-06-26 11:21:54 +0200864 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700865}
866#else
867static void drop_mm_ref(struct mm_struct *mm)
868{
869 if (current->active_mm == mm)
870 load_cr3(swapper_pg_dir);
871}
872#endif
873
874/*
875 * While a process runs, Xen pins its pagetables, which means that the
876 * hypervisor forces it to be read-only, and it controls all updates
877 * to it. This means that all pagetable updates have to go via the
878 * hypervisor, which is moderately expensive.
879 *
880 * Since we're pulling the pagetable down, we switch to use init_mm,
881 * unpin old process pagetable and mark it all read-write, which
882 * allows further operations on it to be simple memory accesses.
883 *
884 * The only subtle point is that another CPU may be still using the
885 * pagetable because of lazy tlb flushing. This means we need need to
886 * switch all CPUs off this pagetable before we can unpin it.
887 */
888void xen_exit_mmap(struct mm_struct *mm)
889{
890 get_cpu(); /* make sure we don't move around */
891 drop_mm_ref(mm);
892 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700893
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700894 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700895
896 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100897 if (page_pinned(mm->pgd))
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700898 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700899
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700900 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700901}