blob: eef3b5c6e2fe36891ef43caf44bf63ee30c352ad [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070048#include <asm/paravirt.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070051#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
53#include <xen/page.h>
54#include <xen/interface/xen.h>
55
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070056#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include "mmu.h"
58
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010059#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010060#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010061
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010062/* Placeholder for holes in the address space */
63static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE]
64 __attribute__((section(".data.page_aligned"))) =
65 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
66
67 /* Array of pointers to pages containing p2m entries */
68static unsigned long *p2m_top[TOP_ENTRIES]
69 __attribute__((section(".data.page_aligned"))) =
70 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010071
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010072/* Arrays of p2m arrays expressed in mfns used for save/restore */
73static unsigned long p2m_top_mfn[TOP_ENTRIES]
74 __attribute__((section(".bss.page_aligned")));
75
Ingo Molnarb20aecc2008-05-28 14:24:38 +020076static unsigned long p2m_top_mfn_list[
77 PAGE_ALIGN(TOP_ENTRIES / P2M_ENTRIES_PER_PAGE)]
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010078 __attribute__((section(".bss.page_aligned")));
79
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010080static inline unsigned p2m_top_index(unsigned long pfn)
81{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010082 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010083 return pfn / P2M_ENTRIES_PER_PAGE;
84}
85
86static inline unsigned p2m_index(unsigned long pfn)
87{
88 return pfn % P2M_ENTRIES_PER_PAGE;
89}
90
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +010091/* Build the parallel p2m_top_mfn structures */
92void xen_setup_mfn_list_list(void)
93{
94 unsigned pfn, idx;
95
96 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
97 unsigned topidx = p2m_top_index(pfn);
98
99 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
100 }
101
102 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
103 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
104 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
105 }
106
107 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
108
109 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
110 virt_to_mfn(p2m_top_mfn_list);
111 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
112}
113
114/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100115void __init xen_build_dynamic_phys_to_machine(void)
116{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100117 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100118 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100119 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100120
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100121 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100122 unsigned topidx = p2m_top_index(pfn);
123
124 p2m_top[topidx] = &mfn_list[pfn];
125 }
126}
127
128unsigned long get_phys_to_machine(unsigned long pfn)
129{
130 unsigned topidx, idx;
131
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100132 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
133 return INVALID_P2M_ENTRY;
134
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100135 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100136 idx = p2m_index(pfn);
137 return p2m_top[topidx][idx];
138}
139
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100140static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100141{
142 unsigned long *p;
143 unsigned i;
144
145 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
146 BUG_ON(p == NULL);
147
148 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
149 p[i] = INVALID_P2M_ENTRY;
150
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100151 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100152 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100153 else
154 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100155}
156
157void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
158{
159 unsigned topidx, idx;
160
161 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
162 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
163 return;
164 }
165
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100166 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
167 BUG_ON(mfn != INVALID_P2M_ENTRY);
168 return;
169 }
170
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100171 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100172 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100173 /* no need to allocate a page to store an invalid entry */
174 if (mfn == INVALID_P2M_ENTRY)
175 return;
Jeremy Fitzhardinged5edbc12008-05-26 23:31:22 +0100176 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100177 }
178
179 idx = p2m_index(pfn);
180 p2m_top[topidx][idx] = mfn;
181}
182
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700183xmaddr_t arbitrary_virt_to_machine(unsigned long address)
184{
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100185 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +0100186 pte_t *pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700187 unsigned offset = address & PAGE_MASK;
188
189 BUG_ON(pte == NULL);
190
191 return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
192}
193
194void make_lowmem_page_readonly(void *vaddr)
195{
196 pte_t *pte, ptev;
197 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100198 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700199
Ingo Molnarf0646e42008-01-30 13:33:43 +0100200 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700201 BUG_ON(pte == NULL);
202
203 ptev = pte_wrprotect(*pte);
204
205 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
206 BUG();
207}
208
209void make_lowmem_page_readwrite(void *vaddr)
210{
211 pte_t *pte, ptev;
212 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100213 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700214
Ingo Molnarf0646e42008-01-30 13:33:43 +0100215 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700216 BUG_ON(pte == NULL);
217
218 ptev = pte_mkwrite(*pte);
219
220 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
221 BUG();
222}
223
224
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700225void xen_set_pmd(pmd_t *ptr, pmd_t val)
226{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700227 struct multicall_space mcs;
228 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700229
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700230 preempt_disable();
231
232 mcs = xen_mc_entry(sizeof(*u));
233 u = mcs.args;
234 u->ptr = virt_to_machine(ptr).maddr;
235 u->val = pmd_val_ma(val);
236 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
237
238 xen_mc_issue(PARAVIRT_LAZY_MMU);
239
240 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700241}
242
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700243/*
244 * Associate a virtual page frame with a given physical page frame
245 * and protection flags for that frame.
246 */
247void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
248{
249 pgd_t *pgd;
250 pud_t *pud;
251 pmd_t *pmd;
252 pte_t *pte;
253
254 pgd = swapper_pg_dir + pgd_index(vaddr);
255 if (pgd_none(*pgd)) {
256 BUG();
257 return;
258 }
259 pud = pud_offset(pgd, vaddr);
260 if (pud_none(*pud)) {
261 BUG();
262 return;
263 }
264 pmd = pmd_offset(pud, vaddr);
265 if (pmd_none(*pmd)) {
266 BUG();
267 return;
268 }
269 pte = pte_offset_kernel(pmd, vaddr);
270 /* <mfn,flags> stored as-is, to permit clearing entries */
271 xen_set_pte(pte, mfn_pte(mfn, flags));
272
273 /*
274 * It's enough to flush this one mapping.
275 * (PGE mappings get flushed as well)
276 */
277 __flush_tlb_one(vaddr);
278}
279
280void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
281 pte_t *ptep, pte_t pteval)
282{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700283 /* updates to init_mm may be done without lock */
284 if (mm == &init_mm)
285 preempt_disable();
286
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700287 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700288 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700289 struct multicall_space mcs;
290 mcs = xen_mc_entry(0);
291
292 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
293 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700294 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700295 } else
296 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700297 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700298 }
299 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700300
301out:
302 if (mm == &init_mm)
303 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700304}
305
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700306pteval_t xen_pte_val(pte_t pte)
307{
308 pteval_t ret = pte.pte;
309
310 if (ret & _PAGE_PRESENT)
311 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
312
313 return ret;
314}
315
316pgdval_t xen_pgd_val(pgd_t pgd)
317{
318 pgdval_t ret = pgd.pgd;
319 if (ret & _PAGE_PRESENT)
320 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
321 return ret;
322}
323
324pte_t xen_make_pte(pteval_t pte)
325{
326 if (pte & _PAGE_PRESENT) {
327 pte = phys_to_machine(XPADDR(pte)).maddr;
328 pte &= ~(_PAGE_PCD | _PAGE_PWT);
329 }
330
331 return (pte_t){ .pte = pte };
332}
333
334pgd_t xen_make_pgd(pgdval_t pgd)
335{
336 if (pgd & _PAGE_PRESENT)
337 pgd = phys_to_machine(XPADDR(pgd)).maddr;
338
339 return (pgd_t){ pgd };
340}
341
342pmdval_t xen_pmd_val(pmd_t pmd)
343{
344 pmdval_t ret = native_pmd_val(pmd);
345 if (ret & _PAGE_PRESENT)
346 ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
347 return ret;
348}
Jeremy Fitzhardinge3843fc22008-05-09 12:05:57 +0100349
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700350void xen_set_pud(pud_t *ptr, pud_t val)
351{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700352 struct multicall_space mcs;
353 struct mmu_update *u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700354
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700355 preempt_disable();
356
357 mcs = xen_mc_entry(sizeof(*u));
358 u = mcs.args;
359 u->ptr = virt_to_machine(ptr).maddr;
360 u->val = pud_val_ma(val);
361 MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
362
363 xen_mc_issue(PARAVIRT_LAZY_MMU);
364
365 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700366}
367
368void xen_set_pte(pte_t *ptep, pte_t pte)
369{
370 ptep->pte_high = pte.pte_high;
371 smp_wmb();
372 ptep->pte_low = pte.pte_low;
373}
374
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700375void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
376{
377 set_64bit((u64 *)ptep, pte_val_ma(pte));
378}
379
380void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
381{
382 ptep->pte_low = 0;
383 smp_wmb(); /* make sure low gets written first */
384 ptep->pte_high = 0;
385}
386
387void xen_pmd_clear(pmd_t *pmdp)
388{
389 xen_set_pmd(pmdp, __pmd(0));
390}
391
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700392pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700393{
Jeremy Fitzhardinge430442e2008-03-17 16:37:08 -0700394 if (pmd & _PAGE_PRESENT)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700395 pmd = phys_to_machine(XPADDR(pmd)).maddr;
396
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700397 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700398}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700399
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700400/*
401 (Yet another) pagetable walker. This one is intended for pinning a
402 pagetable. This means that it walks a pagetable and calls the
403 callback function on each page it finds making up the page table,
404 at every level. It walks the entire pagetable, but it only bothers
405 pinning pte pages which are below pte_limit. In the normal case
406 this will be TASK_SIZE, but at boot we need to pin up to
407 FIXADDR_TOP. But the important bit is that we don't pin beyond
408 there, because then we start getting into Xen's ptes.
409*/
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700410static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700411 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700412{
413 pgd_t *pgd = pgd_base;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700414 int flush = 0;
415 unsigned long addr = 0;
416 unsigned long pgd_next;
417
418 BUG_ON(limit > FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700419
420 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700421 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700422
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700423 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
424 pud_t *pud;
425 unsigned long pud_limit, pud_next;
426
427 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
428
429 if (!pgd_val(*pgd))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700430 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700431
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700432 pud = pud_offset(pgd, 0);
433
434 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700435 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700436
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700437 for (; addr != pud_limit; pud++, addr = pud_next) {
438 pmd_t *pmd;
439 unsigned long pmd_limit;
440
441 pud_next = pud_addr_end(addr, pud_limit);
442
443 if (pud_next < limit)
444 pmd_limit = pud_next;
445 else
446 pmd_limit = limit;
447
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700448 if (pud_none(*pud))
449 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700450
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700451 pmd = pmd_offset(pud, 0);
452
453 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700454 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700455
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700456 for (; addr != pmd_limit; pmd++) {
457 addr += (PAGE_SIZE * PTRS_PER_PTE);
458 if ((pmd_limit-1) < (addr-1)) {
459 addr = pmd_limit;
460 break;
461 }
462
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700463 if (pmd_none(*pmd))
464 continue;
465
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700466 flush |= (*func)(pmd_page(*pmd), PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700467 }
468 }
469 }
470
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700471 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700472
473 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700474}
475
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700476static spinlock_t *lock_pte(struct page *page)
477{
478 spinlock_t *ptl = NULL;
479
480#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
481 ptl = __pte_lockptr(page);
482 spin_lock(ptl);
483#endif
484
485 return ptl;
486}
487
488static void do_unlock(void *v)
489{
490 spinlock_t *ptl = v;
491 spin_unlock(ptl);
492}
493
494static void xen_do_pin(unsigned level, unsigned long pfn)
495{
496 struct mmuext_op *op;
497 struct multicall_space mcs;
498
499 mcs = __xen_mc_entry(sizeof(*op));
500 op = mcs.args;
501 op->cmd = level;
502 op->arg1.mfn = pfn_to_mfn(pfn);
503 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
504}
505
506static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700507{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700508 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700509 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700510
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700511 if (pgfl)
512 flush = 0; /* already pinned */
513 else if (PageHighMem(page))
514 /* kmaps need flushing if we found an unpinned
515 highpage */
516 flush = 1;
517 else {
518 void *pt = lowmem_page_address(page);
519 unsigned long pfn = page_to_pfn(page);
520 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700521 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700522
523 flush = 0;
524
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700525 ptl = NULL;
526 if (level == PT_PTE)
527 ptl = lock_pte(page);
528
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700529 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
530 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700531 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
532
533 if (level == PT_PTE)
534 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
535
536 if (ptl) {
537 /* Queue a deferred unlock for when this batch
538 is completed. */
539 xen_mc_callback(do_unlock, ptl);
540 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700541 }
542
543 return flush;
544}
545
546/* This is called just after a mm has been created, but it has not
547 been used yet. We need to make sure that its pagetable is all
548 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700549void xen_pgd_pin(pgd_t *pgd)
550{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700551 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700552
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700553 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
554 /* re-enable interrupts for kmap_flush_unused */
555 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700556 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700557 xen_mc_batch();
558 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700559
Jeremy Fitzhardinge3843fc22008-05-09 12:05:57 +0100560 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700561 xen_mc_issue(0);
562}
563
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100564/*
565 * On save, we need to pin all pagetables to make sure they get their
566 * mfns turned into pfns. Search the list for any unpinned pgds and pin
567 * them (unpinned pgds are not currently in use, probably because the
568 * process is under construction or destruction).
569 */
570void xen_mm_pin_all(void)
571{
572 unsigned long flags;
573 struct page *page;
574
575 spin_lock_irqsave(&pgd_lock, flags);
576
577 list_for_each_entry(page, &pgd_list, lru) {
578 if (!PagePinned(page)) {
579 xen_pgd_pin((pgd_t *)page_address(page));
580 SetPageSavePinned(page);
581 }
582 }
583
584 spin_unlock_irqrestore(&pgd_lock, flags);
585}
586
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700587/* The init_mm pagetable is really pinned as soon as its created, but
588 that's before we have page structures to store the bits. So do all
589 the book-keeping now. */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700590static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700591{
592 SetPagePinned(page);
593 return 0;
594}
595
596void __init xen_mark_init_mm_pinned(void)
597{
598 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
599}
600
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700601static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700602{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700603 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700604
605 if (pgfl && !PageHighMem(page)) {
606 void *pt = lowmem_page_address(page);
607 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700608 spinlock_t *ptl = NULL;
609 struct multicall_space mcs;
610
611 if (level == PT_PTE) {
612 ptl = lock_pte(page);
613
614 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
615 }
616
617 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700618
619 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
620 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700621 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
622
623 if (ptl) {
624 /* unlock when batch completed */
625 xen_mc_callback(do_unlock, ptl);
626 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700627 }
628
629 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700630}
631
632/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700633static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700634{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700635 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700636
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700637 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700638
639 pgd_walk(pgd, unpin_page, TASK_SIZE);
640
641 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700642}
643
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100644/*
645 * On resume, undo any pinning done at save, so that the rest of the
646 * kernel doesn't see any unexpected pinned pagetables.
647 */
648void xen_mm_unpin_all(void)
649{
650 unsigned long flags;
651 struct page *page;
652
653 spin_lock_irqsave(&pgd_lock, flags);
654
655 list_for_each_entry(page, &pgd_list, lru) {
656 if (PageSavePinned(page)) {
657 BUG_ON(!PagePinned(page));
658 printk("unpinning pinned %p\n", page_address(page));
659 xen_pgd_unpin((pgd_t *)page_address(page));
660 ClearPageSavePinned(page);
661 }
662 }
663
664 spin_unlock_irqrestore(&pgd_lock, flags);
665}
666
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700667void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
668{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700669 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700670 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700671 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700672}
673
674void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
675{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700676 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700677 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700678 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700679}
680
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700681
682#ifdef CONFIG_SMP
683/* Another cpu may still have their %cr3 pointing at the pagetable, so
684 we need to repoint it somewhere else before we can unpin it. */
685static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700686{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700687 struct mm_struct *mm = info;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700688
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700689 if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
690 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700691
692 /* If this cpu still has a stale cr3 reference, then make sure
693 it has been flushed. */
694 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
695 load_cr3(swapper_pg_dir);
696 arch_flush_lazy_cpu_mode();
697 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700698}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700699
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700700static void drop_mm_ref(struct mm_struct *mm)
701{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700702 cpumask_t mask;
703 unsigned cpu;
704
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700705 if (current->active_mm == mm) {
706 if (current->mm == mm)
707 load_cr3(swapper_pg_dir);
708 else
709 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700710 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700711 }
712
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700713 /* Get the "official" set of cpus referring to our pagetable. */
714 mask = mm->cpu_vm_mask;
715
716 /* It's possible that a vcpu may have a stale reference to our
717 cr3, because its in lazy mode, and it hasn't yet flushed
718 its set of pending hypercalls yet. In this case, we can
719 look at its actual current cr3 value, and force it to flush
720 if needed. */
721 for_each_online_cpu(cpu) {
722 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
723 cpu_set(cpu, mask);
724 }
725
726 if (!cpus_empty(mask))
727 xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700728}
729#else
730static void drop_mm_ref(struct mm_struct *mm)
731{
732 if (current->active_mm == mm)
733 load_cr3(swapper_pg_dir);
734}
735#endif
736
737/*
738 * While a process runs, Xen pins its pagetables, which means that the
739 * hypervisor forces it to be read-only, and it controls all updates
740 * to it. This means that all pagetable updates have to go via the
741 * hypervisor, which is moderately expensive.
742 *
743 * Since we're pulling the pagetable down, we switch to use init_mm,
744 * unpin old process pagetable and mark it all read-write, which
745 * allows further operations on it to be simple memory accesses.
746 *
747 * The only subtle point is that another CPU may be still using the
748 * pagetable because of lazy tlb flushing. This means we need need to
749 * switch all CPUs off this pagetable before we can unpin it.
750 */
751void xen_exit_mmap(struct mm_struct *mm)
752{
753 get_cpu(); /* make sure we don't move around */
754 drop_mm_ref(mm);
755 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700756
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700757 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700758
759 /* pgd may not be pinned in the error exit path of execve */
760 if (PagePinned(virt_to_page(mm->pgd)))
761 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700762
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700763 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700764}