blob: 4fca9d88bef051ffa02cfd1bc0edc364eb4b88b4 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070048#include <asm/paravirt.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070049#include <asm/linkage.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050
51#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070052#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053
54#include <xen/page.h>
55#include <xen/interface/xen.h>
56
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070057#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070058#include "mmu.h"
59
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010060#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010061#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010062
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010063/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070064static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010065 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
66
67 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070068static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010069 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010070
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010071/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070072static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010073
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070074static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
75 __page_aligned_bss;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010076
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010077static inline unsigned p2m_top_index(unsigned long pfn)
78{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010079 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010080 return pfn / P2M_ENTRIES_PER_PAGE;
81}
82
83static inline unsigned p2m_index(unsigned long pfn)
84{
85 return pfn % P2M_ENTRIES_PER_PAGE;
86}
87
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010088/* Build the parallel p2m_top_mfn structures */
89void xen_setup_mfn_list_list(void)
90{
91 unsigned pfn, idx;
92
93 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
94 unsigned topidx = p2m_top_index(pfn);
95
96 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
97 }
98
99 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
100 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
101 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
102 }
103
104 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
105
106 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
107 virt_to_mfn(p2m_top_mfn_list);
108 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
109}
110
111/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100112void __init xen_build_dynamic_phys_to_machine(void)
113{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100114 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100115 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100116 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100117
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100118 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100119 unsigned topidx = p2m_top_index(pfn);
120
121 p2m_top[topidx] = &mfn_list[pfn];
122 }
123}
124
125unsigned long get_phys_to_machine(unsigned long pfn)
126{
127 unsigned topidx, idx;
128
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100129 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
130 return INVALID_P2M_ENTRY;
131
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100132 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100133 idx = p2m_index(pfn);
134 return p2m_top[topidx][idx];
135}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200136EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100137
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100138static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100139{
140 unsigned long *p;
141 unsigned i;
142
143 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
144 BUG_ON(p == NULL);
145
146 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
147 p[i] = INVALID_P2M_ENTRY;
148
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100149 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100150 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100151 else
152 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100153}
154
155void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
156{
157 unsigned topidx, idx;
158
159 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
160 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
161 return;
162 }
163
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100164 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
165 BUG_ON(mfn != INVALID_P2M_ENTRY);
166 return;
167 }
168
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100169 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100170 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100171 /* no need to allocate a page to store an invalid entry */
172 if (mfn == INVALID_P2M_ENTRY)
173 return;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100174 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100175 }
176
177 idx = p2m_index(pfn);
178 p2m_top[topidx][idx] = mfn;
179}
180
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700181xmaddr_t arbitrary_virt_to_machine(unsigned long address)
182{
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100183 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +0100184 pte_t *pte = lookup_address(address, &level);
Jan Beulichde067812008-05-15 13:24:52 +0100185 unsigned offset = address & ~PAGE_MASK;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700186
187 BUG_ON(pte == NULL);
188
189 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
190}
191
192void make_lowmem_page_readonly(void *vaddr)
193{
194 pte_t *pte, ptev;
195 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100196 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700197
Ingo Molnarf0646e42008-01-30 13:33:43 +0100198 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700199 BUG_ON(pte == NULL);
200
201 ptev = pte_wrprotect(*pte);
202
203 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
204 BUG();
205}
206
207void make_lowmem_page_readwrite(void *vaddr)
208{
209 pte_t *pte, ptev;
210 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100211 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700212
Ingo Molnarf0646e42008-01-30 13:33:43 +0100213 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700214 BUG_ON(pte == NULL);
215
216 ptev = pte_mkwrite(*pte);
217
218 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
219 BUG();
220}
221
222
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100223static bool page_pinned(void *ptr)
224{
225 struct page *page = virt_to_page(ptr);
226
227 return PagePinned(page);
228}
229
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700230static void extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700231{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700232 struct multicall_space mcs;
233 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700234
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700235 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
236
237 if (mcs.mc != NULL)
238 mcs.mc->args[1]++;
239 else {
240 mcs = __xen_mc_entry(sizeof(*u));
241 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
242 }
243
244 u = mcs.args;
245 *u = *update;
246}
247
248void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
249{
250 struct mmu_update u;
251
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700252 preempt_disable();
253
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700254 xen_mc_batch();
255
256 u.ptr = virt_to_machine(ptr).maddr;
257 u.val = pmd_val_ma(val);
258 extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700259
260 xen_mc_issue(PARAVIRT_LAZY_MMU);
261
262 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700263}
264
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100265void xen_set_pmd(pmd_t *ptr, pmd_t val)
266{
267 /* If page is not pinned, we can just update the entry
268 directly */
269 if (!page_pinned(ptr)) {
270 *ptr = val;
271 return;
272 }
273
274 xen_set_pmd_hyper(ptr, val);
275}
276
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700277/*
278 * Associate a virtual page frame with a given physical page frame
279 * and protection flags for that frame.
280 */
281void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
282{
283 pgd_t *pgd;
284 pud_t *pud;
285 pmd_t *pmd;
286 pte_t *pte;
287
288 pgd = swapper_pg_dir + pgd_index(vaddr);
289 if (pgd_none(*pgd)) {
290 BUG();
291 return;
292 }
293 pud = pud_offset(pgd, vaddr);
294 if (pud_none(*pud)) {
295 BUG();
296 return;
297 }
298 pmd = pmd_offset(pud, vaddr);
299 if (pmd_none(*pmd)) {
300 BUG();
301 return;
302 }
303 pte = pte_offset_kernel(pmd, vaddr);
304 /* <mfn,flags> stored as-is, to permit clearing entries */
305 xen_set_pte(pte, mfn_pte(mfn, flags));
306
307 /*
308 * It's enough to flush this one mapping.
309 * (PGE mappings get flushed as well)
310 */
311 __flush_tlb_one(vaddr);
312}
313
314void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
315 pte_t *ptep, pte_t pteval)
316{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700317 /* updates to init_mm may be done without lock */
318 if (mm == &init_mm)
319 preempt_disable();
320
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700321 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700322 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700323 struct multicall_space mcs;
324 mcs = xen_mc_entry(0);
325
326 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
327 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700328 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700329 } else
330 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700331 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700332 }
333 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700334
335out:
336 if (mm == &init_mm)
337 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700338}
339
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700340pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
341{
342 /* Just return the pte as-is. We preserve the bits on commit */
343 return *ptep;
344}
345
346void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
347 pte_t *ptep, pte_t pte)
348{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700349 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700350
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700351 xen_mc_batch();
352
353 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
354 u.val = pte_val_ma(pte);
355 extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700356
357 xen_mc_issue(PARAVIRT_LAZY_MMU);
358}
359
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700360/* Assume pteval_t is equivalent to all the other *val_t types. */
361static pteval_t pte_mfn_to_pfn(pteval_t val)
362{
363 if (val & _PAGE_PRESENT) {
364 unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
365 pteval_t flags = val & ~PTE_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700366 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700367 }
368
369 return val;
370}
371
372static pteval_t pte_pfn_to_mfn(pteval_t val)
373{
374 if (val & _PAGE_PRESENT) {
375 unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
376 pteval_t flags = val & ~PTE_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700377 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700378 }
379
380 return val;
381}
382
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700383pteval_t xen_pte_val(pte_t pte)
384{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700385 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700386}
387
388pgdval_t xen_pgd_val(pgd_t pgd)
389{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700390 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700391}
392
393pte_t xen_make_pte(pteval_t pte)
394{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700395 pte = pte_pfn_to_mfn(pte);
396 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700397}
398
399pgd_t xen_make_pgd(pgdval_t pgd)
400{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700401 pgd = pte_pfn_to_mfn(pgd);
402 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700403}
404
405pmdval_t xen_pmd_val(pmd_t pmd)
406{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700407 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700408}
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100409
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100410void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700411{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700412 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700413
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700414 preempt_disable();
415
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700416 xen_mc_batch();
417
418 u.ptr = virt_to_machine(ptr).maddr;
419 u.val = pud_val_ma(val);
420 extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700421
422 xen_mc_issue(PARAVIRT_LAZY_MMU);
423
424 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700425}
426
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100427void xen_set_pud(pud_t *ptr, pud_t val)
428{
429 /* If page is not pinned, we can just update the entry
430 directly */
431 if (!page_pinned(ptr)) {
432 *ptr = val;
433 return;
434 }
435
436 xen_set_pud_hyper(ptr, val);
437}
438
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700439void xen_set_pte(pte_t *ptep, pte_t pte)
440{
441 ptep->pte_high = pte.pte_high;
442 smp_wmb();
443 ptep->pte_low = pte.pte_low;
444}
445
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700446void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
447{
448 set_64bit((u64 *)ptep, pte_val_ma(pte));
449}
450
451void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
452{
453 ptep->pte_low = 0;
454 smp_wmb(); /* make sure low gets written first */
455 ptep->pte_high = 0;
456}
457
458void xen_pmd_clear(pmd_t *pmdp)
459{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100460 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700461}
462
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700463pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700464{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700465 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700466 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700467}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700468
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700469/*
470 (Yet another) pagetable walker. This one is intended for pinning a
471 pagetable. This means that it walks a pagetable and calls the
472 callback function on each page it finds making up the page table,
473 at every level. It walks the entire pagetable, but it only bothers
474 pinning pte pages which are below pte_limit. In the normal case
475 this will be TASK_SIZE, but at boot we need to pin up to
476 FIXADDR_TOP. But the important bit is that we don't pin beyond
477 there, because then we start getting into Xen's ptes.
478*/
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700479static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700480 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700481{
482 pgd_t *pgd = pgd_base;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700483 int flush = 0;
484 unsigned long addr = 0;
485 unsigned long pgd_next;
486
487 BUG_ON(limit > FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700488
489 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700490 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700491
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700492 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
493 pud_t *pud;
494 unsigned long pud_limit, pud_next;
495
496 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
497
498 if (!pgd_val(*pgd))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700499 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700500
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700501 pud = pud_offset(pgd, 0);
502
503 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700504 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700505
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700506 for (; addr != pud_limit; pud++, addr = pud_next) {
507 pmd_t *pmd;
508 unsigned long pmd_limit;
509
510 pud_next = pud_addr_end(addr, pud_limit);
511
512 if (pud_next < limit)
513 pmd_limit = pud_next;
514 else
515 pmd_limit = limit;
516
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700517 if (pud_none(*pud))
518 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700519
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700520 pmd = pmd_offset(pud, 0);
521
522 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700523 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700524
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700525 for (; addr != pmd_limit; pmd++) {
526 addr += (PAGE_SIZE * PTRS_PER_PTE);
527 if ((pmd_limit-1) < (addr-1)) {
528 addr = pmd_limit;
529 break;
530 }
531
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700532 if (pmd_none(*pmd))
533 continue;
534
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700535 flush |= (*func)(pmd_page(*pmd), PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700536 }
537 }
538 }
539
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700540 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700541
542 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700543}
544
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700545static spinlock_t *lock_pte(struct page *page)
546{
547 spinlock_t *ptl = NULL;
548
549#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
550 ptl = __pte_lockptr(page);
551 spin_lock(ptl);
552#endif
553
554 return ptl;
555}
556
557static void do_unlock(void *v)
558{
559 spinlock_t *ptl = v;
560 spin_unlock(ptl);
561}
562
563static void xen_do_pin(unsigned level, unsigned long pfn)
564{
565 struct mmuext_op *op;
566 struct multicall_space mcs;
567
568 mcs = __xen_mc_entry(sizeof(*op));
569 op = mcs.args;
570 op->cmd = level;
571 op->arg1.mfn = pfn_to_mfn(pfn);
572 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
573}
574
575static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700576{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700577 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700578 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700579
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700580 if (pgfl)
581 flush = 0; /* already pinned */
582 else if (PageHighMem(page))
583 /* kmaps need flushing if we found an unpinned
584 highpage */
585 flush = 1;
586 else {
587 void *pt = lowmem_page_address(page);
588 unsigned long pfn = page_to_pfn(page);
589 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700590 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700591
592 flush = 0;
593
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700594 ptl = NULL;
595 if (level == PT_PTE)
596 ptl = lock_pte(page);
597
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700598 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
599 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700600 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
601
602 if (level == PT_PTE)
603 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
604
605 if (ptl) {
606 /* Queue a deferred unlock for when this batch
607 is completed. */
608 xen_mc_callback(do_unlock, ptl);
609 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700610 }
611
612 return flush;
613}
614
615/* This is called just after a mm has been created, but it has not
616 been used yet. We need to make sure that its pagetable is all
617 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700618void xen_pgd_pin(pgd_t *pgd)
619{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700620 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700621
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700622 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
623 /* re-enable interrupts for kmap_flush_unused */
624 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700625 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700626 xen_mc_batch();
627 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700628
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100629 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700630 xen_mc_issue(0);
631}
632
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100633/*
634 * On save, we need to pin all pagetables to make sure they get their
635 * mfns turned into pfns. Search the list for any unpinned pgds and pin
636 * them (unpinned pgds are not currently in use, probably because the
637 * process is under construction or destruction).
638 */
639void xen_mm_pin_all(void)
640{
641 unsigned long flags;
642 struct page *page;
643
644 spin_lock_irqsave(&pgd_lock, flags);
645
646 list_for_each_entry(page, &pgd_list, lru) {
647 if (!PagePinned(page)) {
648 xen_pgd_pin((pgd_t *)page_address(page));
649 SetPageSavePinned(page);
650 }
651 }
652
653 spin_unlock_irqrestore(&pgd_lock, flags);
654}
655
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700656/*
657 * The init_mm pagetable is really pinned as soon as its created, but
658 * that's before we have page structures to store the bits. So do all
659 * the book-keeping now.
660 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700661static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700662{
663 SetPagePinned(page);
664 return 0;
665}
666
667void __init xen_mark_init_mm_pinned(void)
668{
669 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
670}
671
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700672static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700673{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700674 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700675
676 if (pgfl && !PageHighMem(page)) {
677 void *pt = lowmem_page_address(page);
678 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700679 spinlock_t *ptl = NULL;
680 struct multicall_space mcs;
681
682 if (level == PT_PTE) {
683 ptl = lock_pte(page);
684
685 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
686 }
687
688 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700689
690 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
691 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700692 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
693
694 if (ptl) {
695 /* unlock when batch completed */
696 xen_mc_callback(do_unlock, ptl);
697 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700698 }
699
700 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700701}
702
703/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700704static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700705{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700706 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700707
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700708 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700709
710 pgd_walk(pgd, unpin_page, TASK_SIZE);
711
712 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700713}
714
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100715/*
716 * On resume, undo any pinning done at save, so that the rest of the
717 * kernel doesn't see any unexpected pinned pagetables.
718 */
719void xen_mm_unpin_all(void)
720{
721 unsigned long flags;
722 struct page *page;
723
724 spin_lock_irqsave(&pgd_lock, flags);
725
726 list_for_each_entry(page, &pgd_list, lru) {
727 if (PageSavePinned(page)) {
728 BUG_ON(!PagePinned(page));
729 printk("unpinning pinned %p\n", page_address(page));
730 xen_pgd_unpin((pgd_t *)page_address(page));
731 ClearPageSavePinned(page);
732 }
733 }
734
735 spin_unlock_irqrestore(&pgd_lock, flags);
736}
737
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700738void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
739{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700740 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700741 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700742 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700743}
744
745void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
746{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700747 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700748 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700749 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700750}
751
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700752
753#ifdef CONFIG_SMP
754/* Another cpu may still have their %cr3 pointing at the pagetable, so
755 we need to repoint it somewhere else before we can unpin it. */
756static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700757{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700758 struct mm_struct *mm = info;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700759
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700760 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
761 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700762
763 /* If this cpu still has a stale cr3 reference, then make sure
764 it has been flushed. */
765 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
766 load_cr3(swapper_pg_dir);
767 arch_flush_lazy_cpu_mode();
768 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700769}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700770
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700771static void drop_mm_ref(struct mm_struct *mm)
772{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700773 cpumask_t mask;
774 unsigned cpu;
775
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700776 if (current->active_mm == mm) {
777 if (current->mm == mm)
778 load_cr3(swapper_pg_dir);
779 else
780 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700781 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700782 }
783
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700784 /* Get the "official" set of cpus referring to our pagetable. */
785 mask = mm->cpu_vm_mask;
786
787 /* It's possible that a vcpu may have a stale reference to our
788 cr3, because its in lazy mode, and it hasn't yet flushed
789 its set of pending hypercalls yet. In this case, we can
790 look at its actual current cr3 value, and force it to flush
791 if needed. */
792 for_each_online_cpu(cpu) {
793 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
794 cpu_set(cpu, mask);
795 }
796
797 if (!cpus_empty(mask))
Jens Axboe3b16cf82008-06-26 11:21:54 +0200798 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700799}
800#else
801static void drop_mm_ref(struct mm_struct *mm)
802{
803 if (current->active_mm == mm)
804 load_cr3(swapper_pg_dir);
805}
806#endif
807
808/*
809 * While a process runs, Xen pins its pagetables, which means that the
810 * hypervisor forces it to be read-only, and it controls all updates
811 * to it. This means that all pagetable updates have to go via the
812 * hypervisor, which is moderately expensive.
813 *
814 * Since we're pulling the pagetable down, we switch to use init_mm,
815 * unpin old process pagetable and mark it all read-write, which
816 * allows further operations on it to be simple memory accesses.
817 *
818 * The only subtle point is that another CPU may be still using the
819 * pagetable because of lazy tlb flushing. This means we need need to
820 * switch all CPUs off this pagetable before we can unpin it.
821 */
822void xen_exit_mmap(struct mm_struct *mm)
823{
824 get_cpu(); /* make sure we don't move around */
825 drop_mm_ref(mm);
826 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700827
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700828 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700829
830 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100831 if (page_pinned(mm->pgd))
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700832 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700833
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700834 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700835}